summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-xconfigure1
-rw-r--r--vp9/common/vp9_blockd.h7
-rw-r--r--vp9/common/vp9_entropymode.c5
-rw-r--r--vp9/common/vp9_entropymv.c5
-rw-r--r--vp9/common/vp9_entropymv.h6
-rw-r--r--vp9/common/vp9_mvref_common.c190
-rw-r--r--vp9/common/x86/vp9_asm_stubs.c91
-rw-r--r--vp9/decoder/vp9_decodemv.c39
-rw-r--r--vp9/decoder/vp9_decodframe.c148
-rw-r--r--vp9/encoder/vp9_bitstream.c132
-rw-r--r--vp9/encoder/vp9_encodeframe.c120
-rw-r--r--vp9/encoder/vp9_encodemb.c6
-rw-r--r--vp9/encoder/vp9_firstpass.c50
-rw-r--r--vp9/encoder/vp9_mcomp.c6
-rw-r--r--vp9/encoder/vp9_onyx_if.c76
-rw-r--r--vp9/encoder/vp9_onyx_int.h4
-rw-r--r--vp9/encoder/vp9_rdopt.c77
-rw-r--r--vp9/encoder/vp9_tokenize.c20
-rw-r--r--vp9/encoder/vp9_tokenize.h8
19 files changed, 294 insertions, 697 deletions
diff --git a/configure b/configure
index 0678e3ea7..5d2910ba7 100755
--- a/configure
+++ b/configure
@@ -239,7 +239,6 @@ HAVE_LIST="
"
EXPERIMENT_LIST="
csm
- new_mvref
implicit_segmentation
newbintramodes
comp_interintra_pred
diff --git a/vp9/common/vp9_blockd.h b/vp9/common/vp9_blockd.h
index e124489ef..107517a9b 100644
--- a/vp9/common/vp9_blockd.h
+++ b/vp9/common/vp9_blockd.h
@@ -261,9 +261,6 @@ typedef struct {
int_mv mv[2]; // for each reference frame used
int_mv ref_mvs[MAX_REF_FRAMES][MAX_MV_REF_CANDIDATES];
int_mv best_mv, best_second_mv;
-#if CONFIG_NEW_MVREF
- int best_index, best_second_index;
-#endif
int mb_mode_context[MAX_REF_FRAMES];
@@ -406,10 +403,6 @@ typedef struct macroblockd {
vp9_prob mb_segment_tree_probs[MB_FEATURE_TREE_PROBS];
vp9_prob mb_segment_mispred_tree_probs[MAX_MB_SEGMENTS];
-#if CONFIG_NEW_MVREF
- vp9_prob mb_mv_ref_probs[MAX_REF_FRAMES][MAX_MV_REF_CANDIDATES-1];
-#endif
-
// Segment features
signed char segment_feature_data[MAX_MB_SEGMENTS][SEG_LVL_MAX];
unsigned int segment_feature_mask[MAX_MB_SEGMENTS];
diff --git a/vp9/common/vp9_entropymode.c b/vp9/common/vp9_entropymode.c
index 0db2de6ee..f19dc12d3 100644
--- a/vp9/common/vp9_entropymode.c
+++ b/vp9/common/vp9_entropymode.c
@@ -718,11 +718,6 @@ void vp9_setup_past_independence(VP9_COMMON *cm, MACROBLOCKD *xd) {
vp9_update_mode_info_border(cm, cm->mip);
vp9_update_mode_info_in_image(cm, cm->mi);
-#if CONFIG_NEW_MVREF
- // Defaults probabilities for encoding the MV ref id signal
- vpx_memset(xd->mb_mv_ref_probs, VP9_DEFAULT_MV_REF_PROB,
- sizeof(xd->mb_mv_ref_probs));
-#endif
cm->ref_frame_sign_bias[GOLDEN_FRAME] = 0;
cm->ref_frame_sign_bias[ALTREF_FRAME] = 0;
diff --git a/vp9/common/vp9_entropymv.c b/vp9/common/vp9_entropymv.c
index 0a81015cb..320c29c6a 100644
--- a/vp9/common/vp9_entropymv.c
+++ b/vp9/common/vp9_entropymv.c
@@ -17,13 +17,8 @@
#define MV_COUNT_SAT 16
#define MV_MAX_UPDATE_FACTOR 160
-#if CONFIG_NEW_MVREF
-/* Integer pel reference mv threshold for use of high-precision 1/8 mv */
-#define COMPANDED_MVREF_THRESH 1000000
-#else
/* Integer pel reference mv threshold for use of high-precision 1/8 mv */
#define COMPANDED_MVREF_THRESH 8
-#endif
/* Smooth or bias the mv-counts before prob computation */
/* #define SMOOTH_MV_COUNTS */
diff --git a/vp9/common/vp9_entropymv.h b/vp9/common/vp9_entropymv.h
index de1bd4383..118574b62 100644
--- a/vp9/common/vp9_entropymv.h
+++ b/vp9/common/vp9_entropymv.h
@@ -26,12 +26,6 @@ int vp9_use_nmv_hp(const MV *ref);
#define VP9_NMV_UPDATE_PROB 255
-#if CONFIG_NEW_MVREF
-#define VP9_MVREF_UPDATE_PROB 252
-#define VP9_DEFAULT_MV_REF_PROB 192
-#define VP9_MV_REF_UPDATE_COST (14 << 8)
-#endif
-
//#define MV_GROUP_UPDATE
#define LOW_PRECISION_MV_UPDATE /* Use 7 bit forward update */
diff --git a/vp9/common/vp9_mvref_common.c b/vp9/common/vp9_mvref_common.c
index 666197366..d8ac68829 100644
--- a/vp9/common/vp9_mvref_common.c
+++ b/vp9/common/vp9_mvref_common.c
@@ -138,102 +138,25 @@ static void scale_mv(MACROBLOCKD *xd, MV_REFERENCE_FRAME this_ref_frame,
*/
}
-/*
-// Adds a new candidate reference vector to the sorted list.
-// If it is a repeat the weight of the existing entry is increased
-// and the order of the list is resorted.
-// This method of add plus sort has been deprecated for now as there is a
-// further sort of the best candidates in vp9_find_best_ref_mvs() and the
-// incremental benefit of both is small. If the decision is made to remove
-// the sort in vp9_find_best_ref_mvs() for performance reasons then it may be
-// worth re-instating some sort of list reordering by weight here.
-//
-static void addmv_and_shuffle(
- int_mv *mv_list,
- int *mv_scores,
- int *refmv_count,
- int_mv candidate_mv,
- int weight
-) {
-
- int i;
- int insert_point;
- int duplicate_found = 0;
-
- // Check for duplicates. If there is one increase its score.
- // We only compare vs the current top candidates.
- insert_point = (*refmv_count < (MAX_MV_REF_CANDIDATES - 1))
- ? *refmv_count : (MAX_MV_REF_CANDIDATES - 1);
-
- i = insert_point;
- if (*refmv_count > i)
- i++;
- while (i > 0) {
- i--;
- if (candidate_mv.as_int == mv_list[i].as_int) {
- duplicate_found = 1;
- mv_scores[i] += weight;
- break;
- }
- }
-
- // If no duplicate and the new candidate is good enough then add it.
- if (!duplicate_found ) {
- if (weight > mv_scores[insert_point]) {
- mv_list[insert_point].as_int = candidate_mv.as_int;
- mv_scores[insert_point] = weight;
- i = insert_point;
- }
- (*refmv_count)++;
- }
-
- // Reshuffle the list so that highest scoring mvs at the top.
- while (i > 0) {
- if (mv_scores[i] > mv_scores[i-1]) {
- int tmp_score = mv_scores[i-1];
- int_mv tmp_mv = mv_list[i-1];
-
- mv_scores[i-1] = mv_scores[i];
- mv_list[i-1] = mv_list[i];
- mv_scores[i] = tmp_score;
- mv_list[i] = tmp_mv;
- i--;
- } else
- break;
- }
-}
-*/
-
-// Adds a new candidate reference vector to the list.
-// The mv is thrown out if it is already in the list.
-// Unlike the addmv_and_shuffle() this does not reorder the list
-// but assumes that candidates are added in the order most likely to
-// match distance and reference frame bias.
+// Add a candidate mv.
+// Discard if it has already been seen.
static void add_candidate_mv(int_mv *mv_list, int *mv_scores,
int *candidate_count, int_mv candidate_mv,
int weight) {
- int i;
-
- // Make sure we dont insert off the end of the list
- const int insert_point = MIN(*candidate_count, MAX_MV_REF_CANDIDATES - 1);
-
- // Look for duplicates
- for (i = 0; i <= insert_point; ++i) {
- if (candidate_mv.as_int == mv_list[i].as_int)
- break;
- }
-
- // Add the candidate. If the list is already full it is only desirable that
- // it should overwrite if it has a higher weight than the last entry.
- if (i >= insert_point && weight > mv_scores[insert_point]) {
- mv_list[insert_point].as_int = candidate_mv.as_int;
- mv_scores[insert_point] = weight;
- *candidate_count += (*candidate_count < MAX_MV_REF_CANDIDATES);
+ if (*candidate_count == 0) {
+ mv_list[0].as_int = candidate_mv.as_int;
+ mv_scores[0] = weight;
+ *candidate_count += 1;
+ } else if ((*candidate_count == 1) &&
+ (candidate_mv.as_int != mv_list[0].as_int)) {
+ mv_list[1].as_int = candidate_mv.as_int;
+ mv_scores[1] = weight;
+ *candidate_count += 1;
}
}
-// This function searches the neighbourhood of a given MB/SB and populates a
-// list of candidate reference vectors.
+// This function searches the neighbourhood of a given MB/SB
+// to try and find candidate reference vectors.
//
void vp9_find_mv_refs(VP9_COMMON *cm, MACROBLOCKD *xd, MODE_INFO *here,
MODE_INFO *lf_here, MV_REFERENCE_FRAME ref_frame,
@@ -251,7 +174,6 @@ void vp9_find_mv_refs(VP9_COMMON *cm, MACROBLOCKD *xd, MODE_INFO *here,
int split_count = 0;
int (*mv_ref_search)[2];
int *ref_distance_weight;
- int zero_seen = 0;
const int mb_col = (-xd->mb_to_left_edge) >> 7;
// Blank the reference vector lists and other local structures.
@@ -289,17 +211,10 @@ void vp9_find_mv_refs(VP9_COMMON *cm, MACROBLOCKD *xd, MODE_INFO *here,
split_count += (candidate_mi->mbmi.mode == SPLITMV);
}
}
- // Look in the last frame if it exists
- if (lf_here) {
- candidate_mi = lf_here;
- if (get_matching_candidate(candidate_mi, ref_frame, &c_refmv)) {
- add_candidate_mv(candidate_mvs, candidate_scores,
- &refmv_count, c_refmv, 18);
- }
- }
+
// More distant neigbours
for (i = 2; (i < MVREF_NEIGHBOURS) &&
- (refmv_count < (MAX_MV_REF_CANDIDATES - 1)); ++i) {
+ (refmv_count < MAX_MV_REF_CANDIDATES); ++i) {
const int mb_search_col = mb_col + mv_ref_search[i][0];
if ((mb_search_col >= cm->cur_tile_mb_col_start) &&
@@ -315,45 +230,49 @@ void vp9_find_mv_refs(VP9_COMMON *cm, MACROBLOCKD *xd, MODE_INFO *here,
}
}
+ // Look in the last frame if it exists
+ if (lf_here && (refmv_count < MAX_MV_REF_CANDIDATES)) {
+ candidate_mi = lf_here;
+ if (get_matching_candidate(candidate_mi, ref_frame, &c_refmv)) {
+ add_candidate_mv(candidate_mvs, candidate_scores,
+ &refmv_count, c_refmv, 17);
+ }
+ }
+
// If we have not found enough candidates consider ones where the
// reference frame does not match. Break out when we have
// MAX_MV_REF_CANDIDATES candidates.
// Look first at spatial neighbours
- if (refmv_count < (MAX_MV_REF_CANDIDATES - 1)) {
- for (i = 0; i < MVREF_NEIGHBOURS; ++i) {
- const int mb_search_col = mb_col + mv_ref_search[i][0];
-
- if ((mb_search_col >= cm->cur_tile_mb_col_start) &&
- (mb_search_col < cm->cur_tile_mb_col_end) &&
- ((mv_ref_search[i][1] << 7) >= xd->mb_to_top_edge)) {
-
- candidate_mi = here + mv_ref_search[i][0] +
- (mv_ref_search[i][1] * xd->mode_info_stride);
-
- get_non_matching_candidates(candidate_mi, ref_frame,
- &c_ref_frame, &c_refmv,
- &c2_ref_frame, &c2_refmv);
-
- if (c_ref_frame != INTRA_FRAME) {
- scale_mv(xd, ref_frame, c_ref_frame, &c_refmv, ref_sign_bias);
- add_candidate_mv(candidate_mvs, candidate_scores,
- &refmv_count, c_refmv, ref_distance_weight[i]);
- }
-
- if (c2_ref_frame != INTRA_FRAME) {
- scale_mv(xd, ref_frame, c2_ref_frame, &c2_refmv, ref_sign_bias);
- add_candidate_mv(candidate_mvs, candidate_scores,
- &refmv_count, c2_refmv, ref_distance_weight[i]);
- }
+ for (i = 0; (i < MVREF_NEIGHBOURS) &&
+ (refmv_count < MAX_MV_REF_CANDIDATES); ++i) {
+ const int mb_search_col = mb_col + mv_ref_search[i][0];
+
+ if ((mb_search_col >= cm->cur_tile_mb_col_start) &&
+ (mb_search_col < cm->cur_tile_mb_col_end) &&
+ ((mv_ref_search[i][1] << 7) >= xd->mb_to_top_edge)) {
+ candidate_mi = here + mv_ref_search[i][0] +
+ (mv_ref_search[i][1] * xd->mode_info_stride);
+
+ get_non_matching_candidates(candidate_mi, ref_frame,
+ &c_ref_frame, &c_refmv,
+ &c2_ref_frame, &c2_refmv);
+
+ if (c_ref_frame != INTRA_FRAME) {
+ scale_mv(xd, ref_frame, c_ref_frame, &c_refmv, ref_sign_bias);
+ add_candidate_mv(candidate_mvs, candidate_scores,
+ &refmv_count, c_refmv, ref_distance_weight[i]);
}
- if (refmv_count >= (MAX_MV_REF_CANDIDATES - 1)) {
- break;
+ if (c2_ref_frame != INTRA_FRAME) {
+ scale_mv(xd, ref_frame, c2_ref_frame, &c2_refmv, ref_sign_bias);
+ add_candidate_mv(candidate_mvs, candidate_scores,
+ &refmv_count, c2_refmv, ref_distance_weight[i]);
}
}
}
+
// Look at the last frame if it exists
- if (refmv_count < (MAX_MV_REF_CANDIDATES - 1) && lf_here) {
+ if (lf_here && (refmv_count < MAX_MV_REF_CANDIDATES)) {
candidate_mi = lf_here;
get_non_matching_candidates(candidate_mi, ref_frame,
&c_ref_frame, &c_refmv,
@@ -362,13 +281,13 @@ void vp9_find_mv_refs(VP9_COMMON *cm, MACROBLOCKD *xd, MODE_INFO *here,
if (c_ref_frame != INTRA_FRAME) {
scale_mv(xd, ref_frame, c_ref_frame, &c_refmv, ref_sign_bias);
add_candidate_mv(candidate_mvs, candidate_scores,
- &refmv_count, c_refmv, 2);
+ &refmv_count, c_refmv, 1);
}
if (c2_ref_frame != INTRA_FRAME) {
scale_mv(xd, ref_frame, c2_ref_frame, &c2_refmv, ref_sign_bias);
add_candidate_mv(candidate_mvs, candidate_scores,
- &refmv_count, c2_refmv, 2);
+ &refmv_count, c2_refmv, 1);
}
}
@@ -394,15 +313,8 @@ void vp9_find_mv_refs(VP9_COMMON *cm, MACROBLOCKD *xd, MODE_INFO *here,
// Scan for 0,0 case and clamp non zero choices
for (i = 0; i < MAX_MV_REF_CANDIDATES; ++i) {
- if (candidate_mvs[i].as_int == 0) {
- zero_seen = 1;
- } else {
- clamp_mv_ref(xd, &candidate_mvs[i]);
- }
+ clamp_mv_ref(xd, &candidate_mvs[i]);
}
- // 0,0 is always a valid reference. Add it if not already seen.
- if (!zero_seen)
- candidate_mvs[MAX_MV_REF_CANDIDATES-1].as_int = 0;
// Copy over the candidate list.
vpx_memcpy(mv_ref_list, candidate_mvs, sizeof(candidate_mvs));
diff --git a/vp9/common/x86/vp9_asm_stubs.c b/vp9/common/x86/vp9_asm_stubs.c
index 310f8ed24..2b66834a7 100644
--- a/vp9/common/x86/vp9_asm_stubs.c
+++ b/vp9/common/x86/vp9_asm_stubs.c
@@ -278,43 +278,20 @@ void vp9_convolve8_ssse3(const uint8_t *src, int src_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h) {
- DECLARE_ALIGNED_ARRAY(16, unsigned char, fdata2, 16*71);
+ DECLARE_ALIGNED_ARRAY(16, unsigned char, fdata2, 64*71);
+ assert(w <= 64);
assert(h <= 64);
-
- if (x_step_q4 == 16 && y_step_q4 == 16 &&
- filter_x[3] != 128 && filter_y[3] != 128) {
- if (w == 16) {
- vp9_filter_block1d16_h8_ssse3(src - 3 * src_stride, src_stride,
- fdata2, 16,
- h + 7, filter_x);
- vp9_filter_block1d16_v8_ssse3(fdata2, 16,
- dst, dst_stride,
- h, filter_y);
- return;
- }
- if (w == 8) {
- vp9_filter_block1d8_h8_ssse3(src - 3 * src_stride, src_stride,
- fdata2, 16,
- h + 7, filter_x);
- vp9_filter_block1d8_v8_ssse3(fdata2, 16,
- dst, dst_stride,
- h, filter_y);
- return;
- }
- if (w == 4) {
- vp9_filter_block1d4_h8_ssse3(src - 3 * src_stride, src_stride,
- fdata2, 16,
- h + 7, filter_x);
- vp9_filter_block1d4_v8_ssse3(fdata2, 16,
- dst, dst_stride,
- h, filter_y);
- return;
- }
+ if (x_step_q4 == 16 && y_step_q4 == 16) {
+ vp9_convolve8_horiz_ssse3(src - 3 * src_stride, src_stride, fdata2, 64,
+ filter_x, x_step_q4, filter_y, y_step_q4,
+ w, h + 7);
+ vp9_convolve8_vert_ssse3(fdata2 + 3 * 64, 64, dst, dst_stride,
+ filter_x, x_step_q4, filter_y, y_step_q4, w, h);
+ } else {
+ vp9_convolve8_c(src, src_stride, dst, dst_stride,
+ filter_x, x_step_q4, filter_y, y_step_q4, w, h);
}
- vp9_convolve8_c(src, src_stride, dst, dst_stride,
- filter_x, x_step_q4, filter_y, y_step_q4,
- w, h);
}
void vp9_convolve8_avg_ssse3(const uint8_t *src, int src_stride,
@@ -322,42 +299,20 @@ void vp9_convolve8_avg_ssse3(const uint8_t *src, int src_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h) {
- DECLARE_ALIGNED_ARRAY(16, unsigned char, fdata2, 16*71);
+ DECLARE_ALIGNED_ARRAY(16, unsigned char, fdata2, 64*71);
+ assert(w <= 64);
assert(h <= 64);
-
- if (x_step_q4 == 16 && y_step_q4 == 16 &&
- filter_x[3] != 128 && filter_y[3] != 128) {
- if (w == 16) {
- vp9_filter_block1d16_h8_ssse3(src - 3 * src_stride, src_stride,
- fdata2, 16,
- h + 7, filter_x);
- vp9_filter_block1d16_v8_avg_ssse3(fdata2, 16,
- dst, dst_stride,
- h, filter_y);
- return;
- }
- if (w == 8) {
- vp9_filter_block1d8_h8_ssse3(src - 3 * src_stride, src_stride,
- fdata2, 16,
- h + 7, filter_x);
- vp9_filter_block1d8_v8_avg_ssse3(fdata2, 16,
- dst, dst_stride,
- h, filter_y);
- return;
- }
- if (w == 4) {
- vp9_filter_block1d4_h8_ssse3(src - 3 * src_stride, src_stride,
- fdata2, 16,
- h + 7, filter_x);
- vp9_filter_block1d4_v8_avg_ssse3(fdata2, 16,
- dst, dst_stride,
- h, filter_y);
- return;
- }
+ if (x_step_q4 == 16 && y_step_q4 == 16) {
+ vp9_convolve8_horiz_ssse3(src - 3 * src_stride, src_stride, fdata2, 64,
+ filter_x, x_step_q4, filter_y, y_step_q4,
+ w, h + 7);
+ vp9_convolve8_avg_vert_ssse3(fdata2 + 3 * 64, 64, dst, dst_stride,
+ filter_x, x_step_q4, filter_y, y_step_q4,
+ w, h);
+ } else {
+ vp9_convolve8_avg_c(src, src_stride, dst, dst_stride,
+ filter_x, x_step_q4, filter_y, y_step_q4, w, h);
}
- vp9_convolve8_avg_c(src, src_stride, dst, dst_stride,
- filter_x, x_step_q4, filter_y, y_step_q4,
- w, h);
}
#endif
diff --git a/vp9/decoder/vp9_decodemv.c b/vp9/decoder/vp9_decodemv.c
index 6478a8ebc..9b3cc03aa 100644
--- a/vp9/decoder/vp9_decodemv.c
+++ b/vp9/decoder/vp9_decodemv.c
@@ -98,22 +98,6 @@ static int read_mb_segid_except(vp9_reader *r,
: (pred_seg_id >= 2 ? vp9_read(r, p[1]) : (pred_seg_id == 0));
}
-#if CONFIG_NEW_MVREF
-int vp9_read_mv_ref_id(vp9_reader *r, vp9_prob *ref_id_probs) {
- int ref_index = 0;
-
- if (vp9_read(r, ref_id_probs[0])) {
- ref_index++;
- if (vp9_read(r, ref_id_probs[1])) {
- ref_index++;
- if (vp9_read(r, ref_id_probs[2]))
- ref_index++;
- }
- }
- return ref_index;
-}
-#endif
-
extern const int vp9_i8x8_block[4];
static void kfread_modes(VP9D_COMP *pbi, MODE_INFO *m,
int mb_row, int mb_col,
@@ -829,29 +813,6 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
#endif
}
-#if CONFIG_NEW_MVREF
- // if ((mbmi->mode == NEWMV) || (mbmi->mode == SPLITMV))
- if (mbmi->mode == NEWMV) {
- int best_index;
- MV_REFERENCE_FRAME ref_frame = mbmi->ref_frame;
-
- // Encode the index of the choice.
- best_index =
- vp9_read_mv_ref_id(r, xd->mb_mv_ref_probs[ref_frame]);
-
- best_mv.as_int = mbmi->ref_mvs[ref_frame][best_index].as_int;
-
- if (mbmi->second_ref_frame > 0) {
- ref_frame = mbmi->second_ref_frame;
-
- // Encode the index of the choice.
- best_index =
- vp9_read_mv_ref_id(r, xd->mb_mv_ref_probs[ref_frame]);
- best_mv_second.as_int = mbmi->ref_mvs[ref_frame][best_index].as_int;
- }
- }
-#endif
-
mbmi->uv_mode = DC_PRED;
switch (mbmi->mode) {
case SPLITMV: {
diff --git a/vp9/decoder/vp9_decodframe.c b/vp9/decoder/vp9_decodframe.c
index 4af921872..f8ef6c030 100644
--- a/vp9/decoder/vp9_decodframe.c
+++ b/vp9/decoder/vp9_decodframe.c
@@ -86,31 +86,31 @@ static int inv_recenter_nonneg(int v, int m) {
return m - ((v + 1) >> 1);
}
-static int decode_uniform(BOOL_DECODER *br, int n) {
+static int decode_uniform(vp9_reader *r, int n) {
int v;
const int l = get_unsigned_bits(n);
const int m = (1 << l) - n;
if (!l)
return 0;
- v = vp9_read_literal(br, l - 1);
- return v < m ? v : (v << 1) - m + vp9_read_bit(br);
+ v = vp9_read_literal(r, l - 1);
+ return v < m ? v : (v << 1) - m + vp9_read_bit(r);
}
-static int decode_term_subexp(BOOL_DECODER *br, int k, int num_syms) {
+static int decode_term_subexp(vp9_reader *r, int k, int num_syms) {
int i = 0, mk = 0, word;
while (1) {
const int b = i ? k + i - 1 : k;
const int a = 1 << b;
if (num_syms <= mk + 3 * a) {
- word = decode_uniform(br, num_syms - mk) + mk;
+ word = decode_uniform(r, num_syms - mk) + mk;
break;
} else {
- if (vp9_read_bit(br)) {
+ if (vp9_read_bit(r)) {
i++;
mk += a;
} else {
- word = vp9_read_literal(br, b) + mk;
+ word = vp9_read_literal(r, b) + mk;
break;
}
}
@@ -118,11 +118,11 @@ static int decode_term_subexp(BOOL_DECODER *br, int k, int num_syms) {
return word;
}
-static int decode_unsigned_max(BOOL_DECODER *br, int max) {
+static int decode_unsigned_max(vp9_reader *r, int max) {
int data = 0, bit = 0, lmax = max;
while (lmax) {
- data |= vp9_read_bit(br) << bit++;
+ data |= vp9_read_bit(r) << bit++;
lmax >>= 1;
}
return data > max ? max : data;
@@ -154,8 +154,8 @@ static int inv_remap_prob(int v, int m) {
}
}
-static vp9_prob read_prob_diff_update(vp9_reader *const bc, int oldp) {
- int delp = decode_term_subexp(bc, SUBEXP_PARAM, 255);
+static vp9_prob read_prob_diff_update(vp9_reader *r, int oldp) {
+ int delp = decode_term_subexp(r, SUBEXP_PARAM, 255);
return (vp9_prob)inv_remap_prob(delp, oldp);
}
@@ -246,8 +246,7 @@ static void propagate_nzcs(VP9_COMMON *cm, MACROBLOCKD *xd) {
}
#endif
-static void decode_16x16(VP9D_COMP *pbi, MACROBLOCKD *xd,
- BOOL_DECODER* const bc) {
+static void decode_16x16(MACROBLOCKD *xd) {
const TX_TYPE tx_type = get_tx_type_16x16(xd, 0);
vp9_dequant_iht_add_16x16_c(tx_type, xd->plane[0].qcoeff,
@@ -263,8 +262,7 @@ static void decode_16x16(VP9D_COMP *pbi, MACROBLOCKD *xd,
xd->plane[2].eobs[0]);
}
-static void decode_8x8(VP9D_COMP *pbi, MACROBLOCKD *xd,
- BOOL_DECODER* const bc) {
+static void decode_8x8(MACROBLOCKD *xd) {
const MB_PREDICTION_MODE mode = xd->mode_info_context->mbmi.mode;
// luma
// if the first one is DCT_DCT assume all the rest are as well
@@ -347,8 +345,7 @@ static INLINE void dequant_add_y(MACROBLOCKD *xd, TX_TYPE tx_type, int idx) {
}
-static void decode_4x4(VP9D_COMP *pbi, MACROBLOCKD *xd,
- BOOL_DECODER* const bc) {
+static void decode_4x4(VP9D_COMP *pbi, MACROBLOCKD *xd, vp9_reader *r) {
TX_TYPE tx_type;
int i = 0;
const MB_PREDICTION_MODE mode = xd->mode_info_context->mbmi.mode;
@@ -386,7 +383,7 @@ static void decode_4x4(VP9D_COMP *pbi, MACROBLOCKD *xd,
xd->mode_info_context->bmi[i].as_mode.context = b->bmi.as_mode.context =
vp9_find_bpred_context(xd, b);
if (!xd->mode_info_context->mbmi.mb_skip_coeff)
- vp9_decode_coefs_4x4(pbi, xd, bc, PLANE_TYPE_Y_WITH_DC, i);
+ vp9_decode_coefs_4x4(pbi, xd, r, PLANE_TYPE_Y_WITH_DC, i);
#endif
vp9_intra4x4_predict(xd, b, b_mode, *(b->base_dst) + b->dst,
b->dst_stride);
@@ -395,7 +392,7 @@ static void decode_4x4(VP9D_COMP *pbi, MACROBLOCKD *xd,
}
#if CONFIG_NEWBINTRAMODES
if (!xd->mode_info_context->mbmi.mb_skip_coeff)
- vp9_decode_mb_tokens_4x4_uv(pbi, xd, bc);
+ vp9_decode_mb_tokens_4x4_uv(pbi, xd, r);
#endif
vp9_build_intra_predictors_sbuv_s(xd, BLOCK_SIZE_MB16X16);
xd->itxm_add_uv_block(xd->plane[1].qcoeff, xd->block[16].dequant,
@@ -612,7 +609,7 @@ static void decode_sb_16x16(MACROBLOCKD *mb, BLOCK_SIZE_TYPE bsize) {
}
static void decode_sb(VP9D_COMP *pbi, MACROBLOCKD *xd, int mb_row, int mb_col,
- BOOL_DECODER* const bc, BLOCK_SIZE_TYPE bsize) {
+ vp9_reader *r, BLOCK_SIZE_TYPE bsize) {
const int bwl = mb_width_log2(bsize), bhl = mb_height_log2(bsize);
const int bw = 1 << bwl, bh = 1 << bhl;
int n, eobtotal;
@@ -644,7 +641,7 @@ static void decode_sb(VP9D_COMP *pbi, MACROBLOCKD *xd, int mb_row, int mb_col,
mb_init_dequantizer(pbi, xd);
// dequantization and idct
- eobtotal = vp9_decode_tokens(pbi, xd, bc, bsize);
+ eobtotal = vp9_decode_tokens(pbi, xd, r, bsize);
if (eobtotal == 0) { // skip loopfilter
for (n = 0; n < bw * bh; n++) {
const int x_idx = n & (bw - 1), y_idx = n >> bwl;
@@ -682,7 +679,7 @@ static void decode_sb(VP9D_COMP *pbi, MACROBLOCKD *xd, int mb_row, int mb_col,
// couples special handles on I8x8, B_PRED, and splitmv modes.
static void decode_mb(VP9D_COMP *pbi, MACROBLOCKD *xd,
int mb_row, int mb_col,
- BOOL_DECODER* const bc) {
+ vp9_reader *r) {
int eobtotal = 0;
const MB_PREDICTION_MODE mode = xd->mode_info_context->mbmi.mode;
const int tx_size = xd->mode_info_context->mbmi.txfm_size;
@@ -718,11 +715,11 @@ static void decode_mb(VP9D_COMP *pbi, MACROBLOCKD *xd,
if (xd->segmentation_enabled)
mb_init_dequantizer(pbi, xd);
- if (!bool_error(bc)) {
+ if (!bool_error(r)) {
#if CONFIG_NEWBINTRAMODES
if (mode != I4X4_PRED)
#endif
- eobtotal = vp9_decode_tokens(pbi, xd, bc, BLOCK_SIZE_MB16X16);
+ eobtotal = vp9_decode_tokens(pbi, xd, r, BLOCK_SIZE_MB16X16);
}
}
@@ -730,7 +727,7 @@ static void decode_mb(VP9D_COMP *pbi, MACROBLOCKD *xd,
mode != I4X4_PRED &&
mode != SPLITMV &&
mode != I8X8_PRED &&
- !bool_error(bc)) {
+ !bool_error(r)) {
xd->mode_info_context->mbmi.mb_skip_coeff = 1;
} else {
#if 0 // def DEC_DEBUG
@@ -739,11 +736,11 @@ static void decode_mb(VP9D_COMP *pbi, MACROBLOCKD *xd,
#endif
if (tx_size == TX_16X16) {
- decode_16x16(pbi, xd, bc);
+ decode_16x16(xd);
} else if (tx_size == TX_8X8) {
- decode_8x8(pbi, xd, bc);
+ decode_8x8(xd);
} else {
- decode_4x4(pbi, xd, bc);
+ decode_4x4(pbi, xd, r);
}
}
@@ -988,7 +985,7 @@ static void init_frame(VP9D_COMP *pbi) {
#if CONFIG_CODE_NONZEROCOUNT
static void read_nzc_probs_common(VP9_COMMON *cm,
- BOOL_DECODER* const bc,
+ vp9_reader *rd,
TX_SIZE tx_size) {
int c, r, b, t;
int tokens, nodes;
@@ -996,7 +993,7 @@ static void read_nzc_probs_common(VP9_COMMON *cm,
vp9_prob upd;
if (!get_nzc_used(tx_size)) return;
- if (!vp9_read_bit(bc)) return;
+ if (!vp9_read_bit(rd)) return;
if (tx_size == TX_32X32) {
tokens = NZC32X32_TOKENS;
@@ -1023,8 +1020,8 @@ static void read_nzc_probs_common(VP9_COMMON *cm,
int offset_nodes = offset * nodes;
for (t = 0; t < nodes; ++t) {
vp9_prob *p = &nzc_probs[offset_nodes + t];
- if (vp9_read(bc, upd)) {
- *p = read_prob_diff_update(bc, *p);
+ if (vp9_read(rd, upd)) {
+ *p = read_prob_diff_update(rd, *p);
}
}
}
@@ -1032,13 +1029,13 @@ static void read_nzc_probs_common(VP9_COMMON *cm,
}
}
-static void read_nzc_pcat_probs(VP9_COMMON *cm, BOOL_DECODER* const bc) {
+static void read_nzc_pcat_probs(VP9_COMMON *cm, vp9_reader *r) {
int c, t, b;
vp9_prob upd = NZC_UPDATE_PROB_PCAT;
if (!(get_nzc_used(TX_4X4) || get_nzc_used(TX_8X8) ||
get_nzc_used(TX_16X16) || get_nzc_used(TX_32X32)))
return;
- if (!vp9_read_bit(bc)) {
+ if (!vp9_read_bit(r)) {
return;
}
for (c = 0; c < MAX_NZC_CONTEXTS; ++c) {
@@ -1046,31 +1043,30 @@ static void read_nzc_pcat_probs(VP9_COMMON *cm, BOOL_DECODER* const bc) {
int bits = vp9_extranzcbits[t + NZC_TOKENS_NOEXTRA];
for (b = 0; b < bits; ++b) {
vp9_prob *p = &cm->fc.nzc_pcat_probs[c][t][b];
- if (vp9_read(bc, upd)) {
- *p = read_prob_diff_update(bc, *p);
+ if (vp9_read(r, upd)) {
+ *p = read_prob_diff_update(r, *p);
}
}
}
}
}
-static void read_nzc_probs(VP9_COMMON *cm,
- BOOL_DECODER* const bc) {
- read_nzc_probs_common(cm, bc, TX_4X4);
+static void read_nzc_probs(VP9_COMMON *cm, vp9_reader *r) {
+ read_nzc_probs_common(cm, r, TX_4X4);
if (cm->txfm_mode != ONLY_4X4)
- read_nzc_probs_common(cm, bc, TX_8X8);
+ read_nzc_probs_common(cm, r, TX_8X8);
if (cm->txfm_mode > ALLOW_8X8)
- read_nzc_probs_common(cm, bc, TX_16X16);
+ read_nzc_probs_common(cm, r, TX_16X16);
if (cm->txfm_mode > ALLOW_16X16)
- read_nzc_probs_common(cm, bc, TX_32X32);
+ read_nzc_probs_common(cm, r, TX_32X32);
#ifdef NZC_PCAT_UPDATE
- read_nzc_pcat_probs(cm, bc);
+ read_nzc_pcat_probs(cm, r);
#endif
}
#endif // CONFIG_CODE_NONZEROCOUNT
static void read_coef_probs_common(VP9D_COMP *pbi,
- BOOL_DECODER* const bc,
+ vp9_reader *r,
vp9_coeff_probs *coef_probs,
TX_SIZE tx_size) {
#if CONFIG_MODELCOEFPROB && MODEL_BASED_UPDATE
@@ -1081,7 +1077,7 @@ static void read_coef_probs_common(VP9D_COMP *pbi,
int i, j, k, l, m;
- if (vp9_read_bit(bc)) {
+ if (vp9_read_bit(r)) {
for (i = 0; i < BLOCK_TYPES; i++) {
for (j = 0; j < REF_TYPES; j++) {
for (k = 0; k < COEF_BANDS; k++) {
@@ -1097,8 +1093,8 @@ static void read_coef_probs_common(VP9D_COMP *pbi,
for (m = mstart; m < entropy_nodes_update; m++) {
vp9_prob *const p = coef_probs[i][j][k][l] + m;
- if (vp9_read(bc, vp9_coef_update_prob[m])) {
- *p = read_prob_diff_update(bc, *p);
+ if (vp9_read(r, vp9_coef_update_prob[m])) {
+ *p = read_prob_diff_update(r, *p);
#if CONFIG_MODELCOEFPROB && MODEL_BASED_UPDATE
if (m == UNCONSTRAINED_NODES - 1)
vp9_get_model_distribution(*p, coef_probs[i][j][k][l], i, j);
@@ -1112,19 +1108,20 @@ static void read_coef_probs_common(VP9D_COMP *pbi,
}
}
-static void read_coef_probs(VP9D_COMP *pbi, BOOL_DECODER* const bc) {
- VP9_COMMON *const pc = &pbi->common;
+static void read_coef_probs(VP9D_COMP *pbi, vp9_reader *r) {
+ const TXFM_MODE mode = pbi->common.txfm_mode;
+ FRAME_CONTEXT *const fc = &pbi->common.fc;
- read_coef_probs_common(pbi, bc, pc->fc.coef_probs_4x4, TX_4X4);
+ read_coef_probs_common(pbi, r, fc->coef_probs_4x4, TX_4X4);
- if (pbi->common.txfm_mode != ONLY_4X4)
- read_coef_probs_common(pbi, bc, pc->fc.coef_probs_8x8, TX_8X8);
+ if (mode != ONLY_4X4)
+ read_coef_probs_common(pbi, r, fc->coef_probs_8x8, TX_8X8);
- if (pbi->common.txfm_mode > ALLOW_8X8)
- read_coef_probs_common(pbi, bc, pc->fc.coef_probs_16x16, TX_16X16);
+ if (mode > ALLOW_8X8)
+ read_coef_probs_common(pbi, r, fc->coef_probs_16x16, TX_16X16);
- if (pbi->common.txfm_mode > ALLOW_16X16)
- read_coef_probs_common(pbi, bc, pc->fc.coef_probs_32x32, TX_32X32);
+ if (mode > ALLOW_16X16)
+ read_coef_probs_common(pbi, r, fc->coef_probs_32x32, TX_32X32);
}
static void update_frame_size(VP9D_COMP *pbi) {
@@ -1343,7 +1340,7 @@ static const uint8_t *setup_frame_size(VP9D_COMP *pbi, int scaling_active,
return data;
}
-static void update_frame_context(VP9D_COMP *pbi, vp9_reader *r) {
+static void update_frame_context(VP9D_COMP *pbi) {
FRAME_CONTEXT *const fc = &pbi->common.fc;
vp9_copy(fc->pre_coef_probs_4x4, fc->coef_probs_4x4);
@@ -1394,16 +1391,11 @@ static void update_frame_context(VP9D_COMP *pbi, vp9_reader *r) {
vp9_zero(fc->nzc_counts_32x32);
vp9_zero(fc->nzc_pcat_counts);
#endif
-
- read_coef_probs(pbi, r);
-#if CONFIG_CODE_NONZEROCOUNT
- read_nzc_probs(&pbi->common, r);
-#endif
}
static void decode_tiles(VP9D_COMP *pbi,
const uint8_t *data, int first_partition_size,
- BOOL_DECODER *header_bc, BOOL_DECODER *residual_bc) {
+ vp9_reader *header_bc, vp9_reader *residual_bc) {
VP9_COMMON *const pc = &pbi->common;
const uint8_t *data_ptr = data + first_partition_size;
@@ -1429,7 +1421,7 @@ static void decode_tiles(VP9D_COMP *pbi,
if (pbi->oxcf.inv_tile_order) {
const int n_cols = pc->tile_columns;
const uint8_t *data_ptr2[4][1 << 6];
- BOOL_DECODER UNINITIALIZED_IS_SAFE(bc_bak);
+ vp9_reader UNINITIALIZED_IS_SAFE(bc_bak);
// pre-initialize the offsets, we're going to read in inverse order
data_ptr2[0][0] = data_ptr;
@@ -1483,7 +1475,7 @@ static void decode_tiles(VP9D_COMP *pbi,
}
int vp9_decode_frame(VP9D_COMP *pbi, const uint8_t **p_data_end) {
- BOOL_DECODER header_bc, residual_bc;
+ vp9_reader header_bc, residual_bc;
VP9_COMMON *const pc = &pbi->common;
MACROBLOCKD *const xd = &pbi->mb;
const uint8_t *data = pbi->source;
@@ -1639,25 +1631,6 @@ int vp9_decode_frame(VP9D_COMP *pbi, const uint8_t **p_data_end) {
}
#endif
-#if CONFIG_NEW_MVREF
- // If Key frame reset mv ref id probabilities to defaults
- if (pc->frame_type != KEY_FRAME) {
- // Read any mv_ref index probability updates
- int i, j;
-
- for (i = 0; i < MAX_REF_FRAMES; ++i) {
- // Skip the dummy entry for intra ref frame.
- if (i == INTRA_FRAME)
- continue;
-
- // Read any updates to probabilities
- for (j = 0; j < MAX_MV_REF_CANDIDATES - 1; ++j)
- if (vp9_read(&header_bc, VP9_MVREF_UPDATE_PROB))
- xd->mb_mv_ref_probs[i][j] = vp9_read_prob(&header_bc);
- }
- }
-#endif
-
if (0) {
FILE *z = fopen("decodestats.stt", "a");
fprintf(z, "%6d F:%d,R:%d,Q:%d\n",
@@ -1668,7 +1641,12 @@ int vp9_decode_frame(VP9D_COMP *pbi, const uint8_t **p_data_end) {
fclose(z);
}
- update_frame_context(pbi, &header_bc);
+ update_frame_context(pbi);
+
+ read_coef_probs(pbi, &header_bc);
+#if CONFIG_CODE_NONZEROCOUNT
+ read_nzc_probs(&pbi->common, &header_bc);
+#endif
// Initialize xd pointers. Any reference should do for xd->pre, so use 0.
vpx_memcpy(&xd->pre, &pc->yv12_fb[pc->active_ref_idx[0]],
diff --git a/vp9/encoder/vp9_bitstream.c b/vp9/encoder/vp9_bitstream.c
index bcfbd6094..d4ccf8ca2 100644
--- a/vp9/encoder/vp9_bitstream.c
+++ b/vp9/encoder/vp9_bitstream.c
@@ -277,55 +277,6 @@ static void update_inter_mode_probs(VP9_COMMON *cm,
}
}
-#if CONFIG_NEW_MVREF
-static void update_mv_ref_probs(VP9_COMP *cpi,
- int mvref_probs[MAX_REF_FRAMES]
- [MAX_MV_REF_CANDIDATES-1]) {
- MACROBLOCKD *xd = &cpi->mb.e_mbd;
- int rf; // Reference frame
- int ref_c; // Motion reference candidate
- int node; // Probability node index
-
- for (rf = 0; rf < MAX_REF_FRAMES; ++rf) {
- int count = 0;
-
- // Skip the dummy entry for intra ref frame.
- if (rf == INTRA_FRAME) {
- continue;
- }
-
- // Sum the counts for all candidates
- for (ref_c = 0; ref_c < MAX_MV_REF_CANDIDATES; ++ref_c) {
- count += cpi->mb_mv_ref_count[rf][ref_c];
- }
-
- // Calculate the tree node probabilities
- for (node = 0; node < MAX_MV_REF_CANDIDATES-1; ++node) {
- int new_prob, old_cost, new_cost;
- unsigned int branch_cnts[2];
-
- // How many hits on each branch at this node
- branch_cnts[0] = cpi->mb_mv_ref_count[rf][node];
- branch_cnts[1] = count - cpi->mb_mv_ref_count[rf][node];
-
- // Work out cost of coding branches with the old and optimal probability
- old_cost = cost_branch256(branch_cnts, xd->mb_mv_ref_probs[rf][node]);
- new_prob = get_prob(branch_cnts[0], count);
- new_cost = cost_branch256(branch_cnts, new_prob);
-
- // Take current 0 branch cases out of residual count
- count -= cpi->mb_mv_ref_count[rf][node];
-
- if ((new_cost + VP9_MV_REF_UPDATE_COST) <= old_cost) {
- mvref_probs[rf][node] = new_prob;
- } else {
- mvref_probs[rf][node] = xd->mb_mv_ref_probs[rf][node];
- }
- }
- }
-}
-#endif
-
static void write_ymode(vp9_writer *bc, int m, const vp9_prob *p) {
write_token(bc, vp9_ymode_tree, p, vp9_ymode_encodings + m);
}
@@ -469,7 +420,7 @@ static void pack_mb_tokens(vp9_writer* const bc,
TOKENEXTRA *p = *tp;
while (p < stop) {
- const int t = p->Token;
+ const int t = p->token;
const struct vp9_token *const a = vp9_coef_encodings + t;
const vp9_extra_bit_struct *const b = vp9_extra_bits + t;
int i = 0;
@@ -497,7 +448,7 @@ static void pack_mb_tokens(vp9_writer* const bc,
if (b->base_val) {
- const int e = p->Extra, L = b->Len;
+ const int e = p->extra, L = b->Len;
if (L) {
const unsigned char *pp = b->prob;
@@ -562,38 +513,6 @@ static void write_nmv(VP9_COMP *cpi, vp9_writer *bc,
vp9_encode_nmv_fp(bc, &e, &ref->as_mv, nmvc, usehp);
}
-#if CONFIG_NEW_MVREF
-static void vp9_write_mv_ref_id(vp9_writer *w,
- vp9_prob * ref_id_probs,
- int mv_ref_id) {
- // Encode the index for the MV reference.
- switch (mv_ref_id) {
- case 0:
- vp9_write(w, 0, ref_id_probs[0]);
- break;
- case 1:
- vp9_write(w, 1, ref_id_probs[0]);
- vp9_write(w, 0, ref_id_probs[1]);
- break;
- case 2:
- vp9_write(w, 1, ref_id_probs[0]);
- vp9_write(w, 1, ref_id_probs[1]);
- vp9_write(w, 0, ref_id_probs[2]);
- break;
- case 3:
- vp9_write(w, 1, ref_id_probs[0]);
- vp9_write(w, 1, ref_id_probs[1]);
- vp9_write(w, 1, ref_id_probs[2]);
- break;
-
- // TRAP.. This should not happen
- default:
- assert(0);
- break;
- }
-}
-#endif
-
// This function writes the current macro block's segnment id to the bitstream
// It should only be called if a segment map update is indicated.
static void write_mb_segid(vp9_writer *bc,
@@ -901,22 +820,6 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m,
}
#endif
-#if CONFIG_NEW_MVREF
- // if ((mode == NEWMV) || (mode == SPLITMV)) {
- if (mode == NEWMV) {
- // Encode the index of the choice.
- vp9_write_mv_ref_id(bc,
- xd->mb_mv_ref_probs[rf], mi->best_index);
-
- if (mi->second_ref_frame > 0) {
- // Encode the index of the choice.
- vp9_write_mv_ref_id(
- bc, xd->mb_mv_ref_probs[mi->second_ref_frame],
- mi->best_second_index);
- }
- }
-#endif
-
switch (mode) { /* new, split require MVs */
case NEWMV:
#ifdef ENTROPY_STATS
@@ -2757,37 +2660,6 @@ void vp9_pack_bitstream(VP9_COMP *cpi, unsigned char *dest,
}
}
-#if CONFIG_NEW_MVREF
- if ((pc->frame_type != KEY_FRAME)) {
- int new_mvref_probs[MAX_REF_FRAMES][MAX_MV_REF_CANDIDATES-1];
- int i, j;
-
- update_mv_ref_probs(cpi, new_mvref_probs);
-
- for (i = 0; i < MAX_REF_FRAMES; ++i) {
- // Skip the dummy entry for intra ref frame.
- if (i == INTRA_FRAME) {
- continue;
- }
-
- // Encode any mandated updates to probabilities
- for (j = 0; j < MAX_MV_REF_CANDIDATES - 1; ++j) {
- if (new_mvref_probs[i][j] != xd->mb_mv_ref_probs[i][j]) {
- vp9_write(&header_bc, 1, VP9_MVREF_UPDATE_PROB);
- vp9_write_prob(&header_bc, new_mvref_probs[i][j]);
-
- // Only update the persistent copy if this is the "real pack"
- if (!cpi->dummy_packing) {
- xd->mb_mv_ref_probs[i][j] = new_mvref_probs[i][j];
- }
- } else {
- vp9_write(&header_bc, 0, VP9_MVREF_UPDATE_PROB);
- }
- }
- }
- }
-#endif
-
vp9_clear_system_state(); // __asm emms;
vp9_copy(cpi->common.fc.pre_coef_probs_4x4,
diff --git a/vp9/encoder/vp9_encodeframe.c b/vp9/encoder/vp9_encodeframe.c
index 5f29c2770..b12652f34 100644
--- a/vp9/encoder/vp9_encodeframe.c
+++ b/vp9/encoder/vp9_encodeframe.c
@@ -345,87 +345,6 @@ void vp9_activity_masking(VP9_COMP *cpi, MACROBLOCK *x) {
adjust_act_zbin(cpi, x);
}
-#if CONFIG_NEW_MVREF
-static int vp9_cost_mv_ref_id(vp9_prob * ref_id_probs, int mv_ref_id) {
- int cost;
-
- // Encode the index for the MV reference.
- switch (mv_ref_id) {
- case 0:
- cost = vp9_cost_zero(ref_id_probs[0]);
- break;
- case 1:
- cost = vp9_cost_one(ref_id_probs[0]);
- cost += vp9_cost_zero(ref_id_probs[1]);
- break;
- case 2:
- cost = vp9_cost_one(ref_id_probs[0]);
- cost += vp9_cost_one(ref_id_probs[1]);
- cost += vp9_cost_zero(ref_id_probs[2]);
- break;
- case 3:
- cost = vp9_cost_one(ref_id_probs[0]);
- cost += vp9_cost_one(ref_id_probs[1]);
- cost += vp9_cost_one(ref_id_probs[2]);
- break;
-
- // TRAP.. This should not happen
- default:
- assert(0);
- break;
- }
- return cost;
-}
-
-// Estimate the cost of each coding the vector using each reference candidate
-static unsigned int pick_best_mv_ref(MACROBLOCK *x,
- MV_REFERENCE_FRAME ref_frame,
- int_mv target_mv,
- int_mv * mv_ref_list,
- int_mv * best_ref) {
- int i;
- int best_index = 0;
- int cost, cost2;
- int zero_seen = (mv_ref_list[0].as_int) ? 0 : 1;
- MACROBLOCKD *xd = &x->e_mbd;
- int max_mv = MV_MAX;
-
- cost = vp9_cost_mv_ref_id(xd->mb_mv_ref_probs[ref_frame], 0) +
- vp9_mv_bit_cost(&target_mv, &mv_ref_list[0], x->nmvjointcost,
- x->mvcost, 96, xd->allow_high_precision_mv);
-
- for (i = 1; i < MAX_MV_REF_CANDIDATES; ++i) {
- // If we see a 0,0 reference vector for a second time we have reached
- // the end of the list of valid candidate vectors.
- if (!mv_ref_list[i].as_int) {
- if (zero_seen)
- break;
- else
- zero_seen = 1;
- }
-
- // Check for cases where the reference choice would give rise to an
- // uncodable/out of range residual for row or col.
- if ((abs(target_mv.as_mv.row - mv_ref_list[i].as_mv.row) > max_mv) ||
- (abs(target_mv.as_mv.col - mv_ref_list[i].as_mv.col) > max_mv)) {
- continue;
- }
-
- cost2 = vp9_cost_mv_ref_id(xd->mb_mv_ref_probs[ref_frame], i) +
- vp9_mv_bit_cost(&target_mv, &mv_ref_list[i], x->nmvjointcost,
- x->mvcost, 96, xd->allow_high_precision_mv);
-
- if (cost2 < cost) {
- cost = cost2;
- best_index = i;
- }
- }
- best_ref->as_int = mv_ref_list[best_index].as_int;
-
- return best_index;
-}
-#endif
-
static void update_state(VP9_COMP *cpi,
PICK_MODE_CONTEXT *ctx,
BLOCK_SIZE_TYPE bsize,
@@ -561,31 +480,11 @@ static void update_state(VP9_COMP *cpi,
if (mbmi->mode == SPLITMV || mbmi->mode == NEWMV) {
int_mv best_mv, best_second_mv;
MV_REFERENCE_FRAME rf = mbmi->ref_frame;
-#if CONFIG_NEW_MVREF
- unsigned int best_index;
- MV_REFERENCE_FRAME sec_ref_frame = mbmi->second_ref_frame;
-#endif
best_mv.as_int = ctx->best_ref_mv.as_int;
best_second_mv.as_int = ctx->second_best_ref_mv.as_int;
if (mbmi->mode == NEWMV) {
best_mv.as_int = mbmi->ref_mvs[rf][0].as_int;
best_second_mv.as_int = mbmi->ref_mvs[mbmi->second_ref_frame][0].as_int;
-#if CONFIG_NEW_MVREF
- best_index = pick_best_mv_ref(x, rf, mbmi->mv[0],
- mbmi->ref_mvs[rf], &best_mv);
- mbmi->best_index = best_index;
- ++cpi->mb_mv_ref_count[rf][best_index];
-
- if (mbmi->second_ref_frame > 0) {
- unsigned int best_index;
- best_index =
- pick_best_mv_ref(x, sec_ref_frame, mbmi->mv[1],
- mbmi->ref_mvs[sec_ref_frame],
- &best_second_mv);
- mbmi->best_second_index = best_index;
- ++cpi->mb_mv_ref_count[sec_ref_frame][best_index];
- }
-#endif
}
mbmi->best_mv.as_int = best_mv.as_int;
mbmi->best_second_mv.as_int = best_second_mv.as_int;
@@ -870,7 +769,7 @@ static void encode_sb(VP9_COMP *cpi,
update_stats(cpi, mb_row, mb_col);
cpi->partition_count[partition_plane(bsize)][PARTITION_NONE]++;
- (*tp)->Token = EOSB_TOKEN;
+ (*tp)->token = EOSB_TOKEN;
(*tp)++;
}
#if CONFIG_SBSEGMENT
@@ -888,7 +787,7 @@ static void encode_sb(VP9_COMP *cpi,
if (output_enabled) {
update_stats(cpi, mb_row, mb_col + i);
- (*tp)->Token = EOSB_TOKEN;
+ (*tp)->token = EOSB_TOKEN;
(*tp)++;
}
}
@@ -906,7 +805,7 @@ static void encode_sb(VP9_COMP *cpi,
if (output_enabled) {
update_stats(cpi, mb_row + i, mb_col);
- (*tp)->Token = EOSB_TOKEN;
+ (*tp)->token = EOSB_TOKEN;
(*tp)++;
}
}
@@ -937,7 +836,7 @@ static void encode_sb(VP9_COMP *cpi,
if (output_enabled) {
update_stats(cpi, mb_row + y_idx, mb_col + x_idx);
- (*tp)->Token = EOSB_TOKEN;
+ (*tp)->token = EOSB_TOKEN;
(*tp)++;
}
}
@@ -970,7 +869,7 @@ static void encode_sb64(VP9_COMP *cpi,
1, mb_row, mb_col, bsize);
update_stats(cpi, mb_row, mb_col);
- (*tp)->Token = EOSB_TOKEN;
+ (*tp)->token = EOSB_TOKEN;
(*tp)++;
cpi->partition_count[partition_plane(bsize)][PARTITION_NONE]++;
#if CONFIG_SBSEGMENT
@@ -985,7 +884,7 @@ static void encode_sb64(VP9_COMP *cpi,
1, mb_row, mb_col + i * 2, BLOCK_SIZE_SB32X64);
update_stats(cpi, mb_row, mb_col + i * 2);
- (*tp)->Token = EOSB_TOKEN;
+ (*tp)->token = EOSB_TOKEN;
(*tp)++;
}
} else if (is_sb[0] == BLOCK_SIZE_SB64X32) {
@@ -999,7 +898,7 @@ static void encode_sb64(VP9_COMP *cpi,
1, mb_row + i * 2, mb_col, BLOCK_SIZE_SB64X32);
update_stats(cpi, mb_row + i * 2, mb_col);
- (*tp)->Token = EOSB_TOKEN;
+ (*tp)->token = EOSB_TOKEN;
(*tp)++;
}
#endif
@@ -1451,9 +1350,6 @@ static void encode_frame_internal(VP9_COMP *cpi) {
vp9_zero(cm->fc.nzc_counts_32x32);
vp9_zero(cm->fc.nzc_pcat_counts);
#endif
-#if CONFIG_NEW_MVREF
- vp9_zero(cpi->mb_mv_ref_count);
-#endif
cpi->mb.e_mbd.lossless = (cm->base_qindex == 0 &&
cm->y_dc_delta_q == 0 &&
@@ -2342,8 +2238,6 @@ static void encode_macroblock(VP9_COMP *cpi, TOKENEXTRA **t,
}
}
-void __attribute__((noinline)) hi(void) { }
-
static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t,
int output_enabled, int mb_row, int mb_col,
BLOCK_SIZE_TYPE bsize) {
diff --git a/vp9/encoder/vp9_encodemb.c b/vp9/encoder/vp9_encodemb.c
index dbbde31d2..a4478dd6d 100644
--- a/vp9/encoder/vp9_encodemb.c
+++ b/vp9/encoder/vp9_encodemb.c
@@ -429,7 +429,7 @@ static void optimize_b(VP9_COMMON *const cm,
*(tokens[eob] + 1) = *(tokens[eob] + 0);
next = eob;
for (i = 0; i < eob; i++)
- token_cache[i] = vp9_dct_value_tokens_ptr[qcoeff_ptr[scan[i]]].Token;
+ token_cache[i] = vp9_dct_value_tokens_ptr[qcoeff_ptr[scan[i]]].token;
nb = vp9_get_coef_neighbors_handle(scan, &pad);
for (i = eob; i-- > i0;) {
@@ -448,7 +448,7 @@ static void optimize_b(VP9_COMMON *const cm,
/* Evaluate the first possibility for this state. */
rate0 = tokens[next][0].rate;
rate1 = tokens[next][1].rate;
- t0 = (vp9_dct_value_tokens_ptr + x)->Token;
+ t0 = (vp9_dct_value_tokens_ptr + x)->token;
/* Consider both possible successor states. */
if (next < default_eob) {
band = get_coef_band(scan, tx_size, i + 1);
@@ -509,7 +509,7 @@ static void optimize_b(VP9_COMMON *const cm,
}
#endif
} else {
- t0 = t1 = (vp9_dct_value_tokens_ptr + x)->Token;
+ t0 = t1 = (vp9_dct_value_tokens_ptr + x)->token;
}
if (next < default_eob) {
band = get_coef_band(scan, tx_size, i + 1);
diff --git a/vp9/encoder/vp9_firstpass.c b/vp9/encoder/vp9_firstpass.c
index ae263caf7..f2e004608 100644
--- a/vp9/encoder/vp9_firstpass.c
+++ b/vp9/encoder/vp9_firstpass.c
@@ -38,7 +38,7 @@
#define IIFACTOR 12.5
#define IIKFACTOR1 12.5
#define IIKFACTOR2 15.0
-#define RMAX 128.0
+#define RMAX 512.0
#define GF_RMAX 96.0
#define ERR_DIVISOR 150.0
#define MIN_DECAY_FACTOR 0.1
@@ -2352,7 +2352,6 @@ static void find_next_key_frame(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) {
double decay_accumulator = 1.0;
double zero_motion_accumulator = 1.0;
double boost_score = 0;
- double old_boost_score = 0.0;
double loop_decay_rate;
double kf_mod_err = 0.0;
@@ -2524,22 +2523,13 @@ static void find_next_key_frame(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) {
boost_score = 0.0;
loop_decay_rate = 1.00; // Starting decay rate
+ // Scan through the kf group collating various stats.
for (i = 0; i < cpi->twopass.frames_to_key; i++) {
double r;
if (EOF == input_stats(cpi, &next_frame))
break;
- if (next_frame.intra_error > cpi->twopass.kf_intra_err_min)
- r = (IIKFACTOR2 * next_frame.intra_error /
- DOUBLE_DIVIDE_CHECK(next_frame.coded_error));
- else
- r = (IIKFACTOR2 * cpi->twopass.kf_intra_err_min /
- DOUBLE_DIVIDE_CHECK(next_frame.coded_error));
-
- if (r > RMAX)
- r = RMAX;
-
// Monitor for static sections.
if ((next_frame.pcnt_inter - next_frame.pcnt_motion) <
zero_motion_accumulator) {
@@ -2547,22 +2537,28 @@ static void find_next_key_frame(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) {
(next_frame.pcnt_inter - next_frame.pcnt_motion);
}
- // How fast is prediction quality decaying
- if (!detect_flash(cpi, 0)) {
- loop_decay_rate = get_prediction_decay_rate(cpi, &next_frame);
- decay_accumulator = decay_accumulator * loop_decay_rate;
- decay_accumulator = decay_accumulator < MIN_DECAY_FACTOR
- ? MIN_DECAY_FACTOR : decay_accumulator;
- }
+ // For the first few frames collect data to decide kf boost.
+ if (i <= (cpi->max_gf_interval * 2)) {
+ if (next_frame.intra_error > cpi->twopass.kf_intra_err_min)
+ r = (IIKFACTOR2 * next_frame.intra_error /
+ DOUBLE_DIVIDE_CHECK(next_frame.coded_error));
+ else
+ r = (IIKFACTOR2 * cpi->twopass.kf_intra_err_min /
+ DOUBLE_DIVIDE_CHECK(next_frame.coded_error));
- boost_score += (decay_accumulator * r);
+ if (r > RMAX)
+ r = RMAX;
- if ((i > MIN_GF_INTERVAL) &&
- ((boost_score - old_boost_score) < 6.25)) {
- break;
- }
+ // How fast is prediction quality decaying
+ if (!detect_flash(cpi, 0)) {
+ loop_decay_rate = get_prediction_decay_rate(cpi, &next_frame);
+ decay_accumulator = decay_accumulator * loop_decay_rate;
+ decay_accumulator = decay_accumulator < MIN_DECAY_FACTOR
+ ? MIN_DECAY_FACTOR : decay_accumulator;
+ }
- old_boost_score = boost_score;
+ boost_score += (decay_accumulator * r);
+ }
}
{
@@ -2592,8 +2588,8 @@ static void find_next_key_frame(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) {
int allocation_chunks;
int alt_kf_bits;
- if (kf_boost < (cpi->twopass.frames_to_key * 5))
- kf_boost = (cpi->twopass.frames_to_key * 5);
+ if (kf_boost < (cpi->twopass.frames_to_key * 3))
+ kf_boost = (cpi->twopass.frames_to_key * 3);
if (kf_boost < 300) // Min KF boost
kf_boost = 300;
diff --git a/vp9/encoder/vp9_mcomp.c b/vp9/encoder/vp9_mcomp.c
index caba2ea85..17dafe6d7 100644
--- a/vp9/encoder/vp9_mcomp.c
+++ b/vp9/encoder/vp9_mcomp.c
@@ -56,9 +56,9 @@ int vp9_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvjcost, int *mvcost[2],
MV v;
v.row = mv->as_mv.row - ref->as_mv.row;
v.col = mv->as_mv.col - ref->as_mv.col;
- return ((mvjcost[vp9_get_mv_joint(&v)] +
- mvcost[0][v.row] +
- mvcost[1][v.col]) * weight) >> 7;
+ return ROUND_POWER_OF_TWO((mvjcost[vp9_get_mv_joint(&v)] +
+ mvcost[0][v.row] +
+ mvcost[1][v.col]) * weight, 7);
}
static int mv_err_cost(int_mv *mv, int_mv *ref, int *mvjcost, int *mvcost[2],
diff --git a/vp9/encoder/vp9_onyx_if.c b/vp9/encoder/vp9_onyx_if.c
index 9997e188c..85ac5231d 100644
--- a/vp9/encoder/vp9_onyx_if.c
+++ b/vp9/encoder/vp9_onyx_if.c
@@ -177,15 +177,16 @@ static void init_minq_luts(void) {
kf_low_motion_minq[i] = calculate_minq_index(maxq,
- 0.0000003,
- -0.000015,
- 0.074,
+ 0.000001,
+ -0.0004,
+ 0.15,
0.0);
kf_high_motion_minq[i] = calculate_minq_index(maxq,
- 0.0000004,
- -0.000125,
- 0.14,
+ 0.000002,
+ -0.0012,
+ 0.5,
0.0);
+
gf_low_motion_minq[i] = calculate_minq_index(maxq,
0.0000015,
-0.0009,
@@ -2757,31 +2758,7 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi,
q = cpi->active_worst_quality;
if (cm->frame_type == KEY_FRAME) {
-#if CONFIG_MULTIPLE_ARF
- double current_q;
-#endif
- int high = 2000;
- int low = 400;
-
- if (cpi->kf_boost > high) {
- cpi->active_best_quality = kf_low_motion_minq[q];
- } else if (cpi->kf_boost < low) {
- cpi->active_best_quality = kf_high_motion_minq[q];
- } else {
- const int gap = high - low;
- const int offset = high - cpi->kf_boost;
- const int qdiff = kf_high_motion_minq[q] - kf_low_motion_minq[q];
- const int adjustment = ((offset * qdiff) + (gap >> 1)) / gap;
-
- cpi->active_best_quality = kf_low_motion_minq[q] + adjustment;
- }
-
- // Make an adjustment based on the % static
- // The main impact of this is at lower Q to prevent overly large key
- // frames unless a lot of the image is static.
- if (cpi->kf_zeromotion_pct < 64)
- cpi->active_best_quality += 4 - (cpi->kf_zeromotion_pct >> 4);
-
+#if !CONFIG_MULTIPLE_ARF
// Special case for key frames forced because we have reached
// the maximum key frame interval. Here force the Q to a range
// based on the ambient Q to reduce the risk of popping
@@ -2794,8 +2771,43 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi,
(last_boosted_q * 0.75));
cpi->active_best_quality = MAX(qindex + delta_qindex, cpi->best_quality);
+ } else {
+ int high = 5000;
+ int low = 400;
+ double q_adj_factor = 1.0;
+ double q_val;
+
+ // Baseline value derived from cpi->active_worst_quality and kf boost
+ if (cpi->kf_boost > high) {
+ cpi->active_best_quality = kf_low_motion_minq[q];
+ } else if (cpi->kf_boost < low) {
+ cpi->active_best_quality = kf_high_motion_minq[q];
+ } else {
+ const int gap = high - low;
+ const int offset = high - cpi->kf_boost;
+ const int qdiff = kf_high_motion_minq[q] - kf_low_motion_minq[q];
+ const int adjustment = ((offset * qdiff) + (gap >> 1)) / gap;
+
+ cpi->active_best_quality = kf_low_motion_minq[q] + adjustment;
+ }
+
+
+ // Allow somewhat lower kf minq with small image formats.
+ if ((cm->width * cm->height) <= (352 * 288)) {
+ q_adj_factor -= 0.25;
+ }
+
+ // Make a further adjustment based on the kf zero motion measure.
+ q_adj_factor += 0.05 - (0.001 * (double)cpi->kf_zeromotion_pct);
+
+ // Convert the adjustment factor to a qindex delta on active_best_quality.
+ q_val = vp9_convert_qindex_to_q(cpi->active_best_quality);
+ cpi->active_best_quality +=
+ compute_qdelta(cpi, q_val, (q_val * q_adj_factor));
}
-#if CONFIG_MULTIPLE_ARF
+#else
+ double current_q;
+
// Force the KF quantizer to be 30% of the active_worst_quality.
current_q = vp9_convert_qindex_to_q(cpi->active_worst_quality);
cpi->active_best_quality = cpi->active_worst_quality
diff --git a/vp9/encoder/vp9_onyx_int.h b/vp9/encoder/vp9_onyx_int.h
index 197ede20d..ba7505ee5 100644
--- a/vp9/encoder/vp9_onyx_int.h
+++ b/vp9/encoder/vp9_onyx_int.h
@@ -682,10 +682,6 @@ typedef struct VP9_COMP {
[VP9_SWITCHABLE_FILTERS];
unsigned int best_switchable_interp_count[VP9_SWITCHABLE_FILTERS];
-#if CONFIG_NEW_MVREF
- unsigned int mb_mv_ref_count[MAX_REF_FRAMES][MAX_MV_REF_CANDIDATES];
-#endif
-
int initial_width;
int initial_height;
diff --git a/vp9/encoder/vp9_rdopt.c b/vp9/encoder/vp9_rdopt.c
index 900f889fc..cf37626f9 100644
--- a/vp9/encoder/vp9_rdopt.c
+++ b/vp9/encoder/vp9_rdopt.c
@@ -290,7 +290,6 @@ void vp9_initialize_rd_consts(VP9_COMP *cpi, int qindex) {
} else {
cpi->rd_threshes[i] = INT_MAX;
}
-
cpi->rd_baseline_thresh[i] = cpi->rd_threshes[i];
}
} else {
@@ -302,7 +301,6 @@ void vp9_initialize_rd_consts(VP9_COMP *cpi, int qindex) {
} else {
cpi->rd_threshes[i] = INT_MAX;
}
-
cpi->rd_baseline_thresh[i] = cpi->rd_threshes[i];
}
}
@@ -516,7 +514,7 @@ static INLINE int cost_coeffs(VP9_COMMON *const cm, MACROBLOCK *mb,
#endif
for (; c < eob; c++) {
int v = qcoeff_ptr[scan[c]];
- int t = vp9_dct_value_tokens_ptr[v].Token;
+ int t = vp9_dct_value_tokens_ptr[v].token;
#if CONFIG_CODE_NONZEROCOUNT
nzc += (v != 0);
#endif
@@ -2476,7 +2474,7 @@ static void mv_pred(VP9_COMP *cpi, MACROBLOCK *x,
int row_offset, col_offset;
// Get the sad for each candidate reference mv
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < MAX_MV_REF_CANDIDATES; i++) {
this_mv.as_int = mbmi->ref_mvs[ref_frame][i].as_int;
// The list is at an end if we see 0 for a second time.
@@ -3481,6 +3479,7 @@ static void rd_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
yv12_mb[mbmi->ref_frame].y_height != cm->mb_rows * 16) &&
this_mode != ZEROMV)
continue;
+
if (mbmi->second_ref_frame > 0 &&
(yv12_mb[mbmi->second_ref_frame].y_width != cm->mb_cols * 16 ||
yv12_mb[mbmi->second_ref_frame].y_height != cm->mb_rows * 16) &&
@@ -4323,6 +4322,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
int64_t best_pred_diff[NB_PREDICTION_TYPES];
int64_t best_pred_rd[NB_PREDICTION_TYPES];
MB_MODE_INFO best_mbmode;
+ int j;
int mode_index, best_mode_index = 0;
unsigned int ref_costs[MAX_REF_FRAMES];
#if CONFIG_COMP_INTERINTRA_PRED
@@ -4340,6 +4340,8 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
int dist_uv[TX_SIZE_MAX_SB], skip_uv[TX_SIZE_MAX_SB];
MB_PREDICTION_MODE mode_uv[TX_SIZE_MAX_SB];
struct scale_factors scale_factor[4];
+ unsigned int ref_frame_mask = 0;
+ unsigned int mode_mask = 0;
xd->mode_info_context->mbmi.segment_id = segment_id;
estimate_ref_frame_costs(cpi, segment_id, ref_costs);
@@ -4350,23 +4352,55 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
for (i = 0; i < NB_TXFM_MODES; i++)
best_txfm_rd[i] = INT64_MAX;
+ // Create a mask set to 1 for each frame used by a smaller resolution.p
+ if (cpi->Speed > 0) {
+ switch (block_size) {
+ case BLOCK_64X64:
+ for (i = 0; i < 4; i++) {
+ for (j = 0; j < 4; j++) {
+ ref_frame_mask |= (1 << x->mb_context[i][j].mic.mbmi.ref_frame);
+ mode_mask |= (1 << x->mb_context[i][j].mic.mbmi.mode);
+ }
+ }
+ for (i = 0; i < 4; i++) {
+ ref_frame_mask |= (1 << x->sb32_context[i].mic.mbmi.ref_frame);
+ mode_mask |= (1 << x->sb32_context[i].mic.mbmi.mode);
+ }
+ break;
+ case BLOCK_32X32:
+ for (i = 0; i < 4; i++) {
+ ref_frame_mask |= (1
+ << x->mb_context[xd->sb_index][i].mic.mbmi.ref_frame);
+ mode_mask |= (1 << x->mb_context[xd->sb_index][i].mic.mbmi.mode);
+ }
+ break;
+ default:
+ // Until we handle all block sizes set it to present;
+ ref_frame_mask = 0xff;
+ mode_mask = 0xff;
+ break;
+ }
+ }
+
for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ref_frame++) {
if (cpi->ref_frame_flags & flag_list[ref_frame]) {
setup_buffer_inter(cpi, x, idx_list[ref_frame], ref_frame, block_size,
- mb_row, mb_col, frame_mv[NEARESTMV],
- frame_mv[NEARMV], frame_mdcounts,
- yv12_mb, scale_factor);
+ mb_row, mb_col, frame_mv[NEARESTMV], frame_mv[NEARMV],
+ frame_mdcounts, yv12_mb, scale_factor);
}
frame_mv[NEWMV][ref_frame].as_int = INVALID_MV;
frame_mv[ZEROMV][ref_frame].as_int = 0;
}
-
- mbmi->mode = DC_PRED;
- for (i = 0; i <= ((bsize < BLOCK_SIZE_SB64X64) ? TX_16X16 : TX_32X32); i++) {
- mbmi->txfm_size = i;
- rd_pick_intra_sbuv_mode(cpi, x, &rate_uv_intra[i], &rate_uv_tokenonly[i],
- &dist_uv[i], &skip_uv[i], bsize);
- mode_uv[i] = mbmi->uv_mode;
+ if (cpi->Speed == 0
+ || (cpi->Speed > 0 && (ref_frame_mask & (1 << INTRA_FRAME)))) {
+ mbmi->mode = DC_PRED;
+ for (i = 0; i <= ((bsize < BLOCK_SIZE_SB64X64) ? TX_16X16 : TX_32X32);
+ i++) {
+ mbmi->txfm_size = i;
+ rd_pick_intra_sbuv_mode(cpi, x, &rate_uv_intra[i], &rate_uv_tokenonly[i],
+ &dist_uv[i], &skip_uv[i], bsize);
+ mode_uv[i] = mbmi->uv_mode;
+ }
}
for (mode_index = 0; mode_index < MAX_MODES; ++mode_index) {
@@ -4392,10 +4426,21 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
x->skip = 0;
this_mode = vp9_mode_order[mode_index].mode;
ref_frame = vp9_mode_order[mode_index].ref_frame;
- if (!(ref_frame == INTRA_FRAME ||
- (cpi->ref_frame_flags & flag_list[ref_frame]))) {
+ if (!(ref_frame == INTRA_FRAME
+ || (cpi->ref_frame_flags & flag_list[ref_frame]))) {
continue;
}
+ if (cpi->Speed > 0) {
+ if (!(ref_frame_mask & (1 << ref_frame))) {
+ continue;
+ }
+ if (vp9_mode_order[mode_index].second_ref_frame != NONE
+ && !(ref_frame_mask
+ & (1 << vp9_mode_order[mode_index].second_ref_frame))) {
+ continue;
+ }
+ }
+
mbmi->ref_frame = ref_frame;
mbmi->second_ref_frame = vp9_mode_order[mode_index].second_ref_frame;
set_scale_factors(xd, mbmi->ref_frame, mbmi->second_ref_frame,
diff --git a/vp9/encoder/vp9_tokenize.c b/vp9/encoder/vp9_tokenize.c
index b68ef5d23..6b201488f 100644
--- a/vp9/encoder/vp9_tokenize.c
+++ b/vp9/encoder/vp9_tokenize.c
@@ -77,21 +77,21 @@ static void fill_value_tokens() {
while (++j < 11 && e[j].base_val <= a) {}
- t[i].Token = --j;
+ t[i].token = --j;
eb |= (a - e[j].base_val) << 1;
} else
- t[i].Token = a;
+ t[i].token = a;
- t[i].Extra = eb;
+ t[i].extra = eb;
}
// initialize the cost for extra bits for all possible coefficient value.
{
int cost = 0;
- vp9_extra_bit_struct *p = vp9_extra_bits + t[i].Token;
+ vp9_extra_bit_struct *p = vp9_extra_bits + t[i].token;
if (p->base_val) {
- const int extra = t[i].Extra;
+ const int extra = t[i].extra;
const int Length = p->Len;
if (Length)
@@ -307,8 +307,8 @@ static void tokenize_b(VP9_COMP *cpi,
v = qcoeff_ptr[rc];
assert(-DCT_MAX_VALUE <= v && v < DCT_MAX_VALUE);
- t->Extra = vp9_dct_value_tokens_ptr[v].Extra;
- token = vp9_dct_value_tokens_ptr[v].Token;
+ t->extra = vp9_dct_value_tokens_ptr[v].extra;
+ token = vp9_dct_value_tokens_ptr[v].token;
} else {
#if CONFIG_CODE_NONZEROCOUNT
if (nzc_used)
@@ -318,7 +318,7 @@ static void tokenize_b(VP9_COMP *cpi,
token = DCT_EOB_TOKEN;
}
- t->Token = token;
+ t->token = token;
t->context_tree = probs[type][ref][band][pt];
#if CONFIG_CODE_NONZEROCOUNT
// Skip zero node if there are no zeros left
@@ -327,7 +327,7 @@ static void tokenize_b(VP9_COMP *cpi,
else
#endif
t->skip_eob_node = (c > 0) && (token_cache[c - 1] == 0);
- assert(vp9_coef_encodings[t->Token].len - t->skip_eob_node > 0);
+ assert(vp9_coef_encodings[t->token].len - t->skip_eob_node > 0);
if (!dry_run) {
++counts[type][ref][band][pt][token];
if (!t->skip_eob_node)
@@ -809,7 +809,7 @@ static void stuff_b(VP9_COMP *cpi,
#endif
pt = combine_entropy_contexts(a_ec, l_ec);
band = 0;
- t->Token = DCT_EOB_TOKEN;
+ t->token = DCT_EOB_TOKEN;
t->context_tree = probs[type][ref][band][pt];
t->skip_eob_node = 0;
++t;
diff --git a/vp9/encoder/vp9_tokenize.h b/vp9/encoder/vp9_tokenize.h
index 2dcbd3002..82d798e47 100644
--- a/vp9/encoder/vp9_tokenize.h
+++ b/vp9/encoder/vp9_tokenize.h
@@ -17,14 +17,14 @@
void vp9_tokenize_initialize();
typedef struct {
- int16_t Token;
- int16_t Extra;
+ int16_t token;
+ int16_t extra;
} TOKENVALUE;
typedef struct {
const vp9_prob *context_tree;
- int16_t Extra;
- uint8_t Token;
+ int16_t extra;
+ uint8_t token;
uint8_t skip_eob_node;
} TOKENEXTRA;