summaryrefslogtreecommitdiff
path: root/vp9
diff options
context:
space:
mode:
Diffstat (limited to 'vp9')
-rw-r--r--vp9/encoder/vp9_encoder.c18
-rw-r--r--vp9/encoder/vp9_mcomp.c41
-rw-r--r--vp9/encoder/vp9_mcomp.h6
-rw-r--r--vp9/encoder/vp9_ratectrl.c284
-rw-r--r--vp9/encoder/vp9_ratectrl.h4
-rw-r--r--vp9/encoder/vp9_rd.c19
-rw-r--r--vp9/encoder/vp9_rdopt.c34
7 files changed, 233 insertions, 173 deletions
diff --git a/vp9/encoder/vp9_encoder.c b/vp9/encoder/vp9_encoder.c
index b6048b9b7..a73185623 100644
--- a/vp9/encoder/vp9_encoder.c
+++ b/vp9/encoder/vp9_encoder.c
@@ -3836,9 +3836,10 @@ static int encode_without_recode_loop(VP9_COMP *cpi, size_t *size,
if (svc->spatial_layer_id == svc->first_spatial_layer_to_encode) {
svc->high_source_sad_superframe = cpi->rc.high_source_sad;
// On scene change reset temporal layer pattern to TL0.
- // TODO(marpan/jianj): Fix this to handle case where base
- // spatial layers are skipped, in which case we should insert
- // and reset to spatial layer 0 on scene change.
+ // Note that if the base/lower spatial layers are skipped: instead of
+ // inserting base layer here, we force max-q for the next superframe
+ // with lower spatial layers: this is done in vp9_encodedframe_overshoot()
+ // when max-q is decided for the current layer.
if (svc->high_source_sad_superframe && svc->temporal_layer_id > 0) {
// rc->high_source_sad will get reset so copy it to restore it.
int tmp_high_source_sad = cpi->rc.high_source_sad;
@@ -4851,8 +4852,7 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi, size_t *size,
vp9_pack_bitstream(cpi, dest, size);
if (cpi->rc.use_post_encode_drop && cm->base_qindex < cpi->rc.worst_quality &&
- cpi->svc.spatial_layer_id == 0 &&
- post_encode_drop_screen_content(cpi, size)) {
+ cpi->svc.spatial_layer_id == 0 && post_encode_drop_cbr(cpi, size)) {
restore_coding_context(cpi);
return;
}
@@ -5664,10 +5664,10 @@ uint32_t motion_compensated_prediction(VP9_COMP *cpi, ThreadData *td,
(void)sadpb;
prepare_nb_full_mvs(&cpi->tpl_stats[frame_idx], mi_row, mi_col, rf_idx, bsize,
nb_full_mvs);
- vp9_full_pixel_diamond_new(cpi, x, &best_ref_mv1_full, step_param, lambda,
- MAX_MVSEARCH_STEPS - 1 - step_param, 1,
- &cpi->fn_ptr[bsize], nb_full_mvs, tpl_stats,
- rf_idx);
+ vp9_full_pixel_diamond_new(
+ cpi, x, &best_ref_mv1_full, step_param, lambda, 1, &cpi->fn_ptr[bsize],
+ nb_full_mvs, &tpl_stats->mv_arr[rf_idx].as_mv,
+ &tpl_stats->mv_dist[rf_idx], &tpl_stats->mv_cost[rf_idx]);
#else
(void)frame_idx;
(void)mi_row;
diff --git a/vp9/encoder/vp9_mcomp.c b/vp9/encoder/vp9_mcomp.c
index a2543035c..235f0345e 100644
--- a/vp9/encoder/vp9_mcomp.c
+++ b/vp9/encoder/vp9_mcomp.c
@@ -2240,24 +2240,18 @@ unsigned int vp9_int_pro_motion_estimation(const VP9_COMP *cpi, MACROBLOCK *x,
refining search */
double vp9_full_pixel_diamond_new(const VP9_COMP *cpi, MACROBLOCK *x,
MV *mvp_full, int step_param, double lambda,
- int further_steps, int do_refine,
+ int do_refine,
const vp9_variance_fn_ptr_t *fn_ptr,
- const int_mv *nb_full_mvs,
- TplDepStats *tpl_stats, int rf_idx) {
- MV *dst_mv = &tpl_stats->mv_arr[rf_idx].as_mv;
- MV temp_mv;
+ const int_mv *nb_full_mvs, MV *best_mv,
+ double *best_mv_dist, double *best_mv_cost) {
int n, num00 = 0;
double thissme;
- double mv_dist;
- double mv_cost;
double bestsme;
+ const int further_steps = MAX_MVSEARCH_STEPS - 1 - step_param;
vpx_clear_system_state();
- bestsme = vp9_diamond_search_sad_new(x, &cpi->ss_cfg, mvp_full, &temp_mv,
- &mv_dist, &mv_cost, step_param, lambda,
- &n, fn_ptr, nb_full_mvs);
- *dst_mv = temp_mv;
- tpl_stats->mv_dist[rf_idx] = mv_dist;
- tpl_stats->mv_cost[rf_idx] = mv_cost;
+ bestsme = vp9_diamond_search_sad_new(x, &cpi->ss_cfg, mvp_full, best_mv,
+ best_mv_dist, best_mv_cost, step_param,
+ lambda, &n, fn_ptr, nb_full_mvs);
// If there won't be more n-step search, check to see if refining search is
// needed.
@@ -2268,6 +2262,9 @@ double vp9_full_pixel_diamond_new(const VP9_COMP *cpi, MACROBLOCK *x,
if (num00) {
num00--;
} else {
+ MV temp_mv;
+ double mv_dist;
+ double mv_cost;
thissme = vp9_diamond_search_sad_new(x, &cpi->ss_cfg, mvp_full, &temp_mv,
&mv_dist, &mv_cost, step_param + n,
lambda, &num00, fn_ptr, nb_full_mvs);
@@ -2276,9 +2273,9 @@ double vp9_full_pixel_diamond_new(const VP9_COMP *cpi, MACROBLOCK *x,
if (thissme < bestsme) {
bestsme = thissme;
- *dst_mv = temp_mv;
- tpl_stats->mv_dist[rf_idx] = mv_dist;
- tpl_stats->mv_cost[rf_idx] = mv_cost;
+ *best_mv = temp_mv;
+ *best_mv_dist = mv_dist;
+ *best_mv_cost = mv_cost;
}
}
}
@@ -2286,15 +2283,17 @@ double vp9_full_pixel_diamond_new(const VP9_COMP *cpi, MACROBLOCK *x,
// final 1-away diamond refining search
if (do_refine) {
const int search_range = 8;
- MV best_mv = *dst_mv;
+ MV temp_mv = *best_mv;
+ double mv_dist;
+ double mv_cost;
thissme =
- vp9_refining_search_sad_new(x, &best_mv, &mv_dist, &mv_cost, lambda,
+ vp9_refining_search_sad_new(x, &temp_mv, &mv_dist, &mv_cost, lambda,
search_range, fn_ptr, nb_full_mvs);
if (thissme < bestsme) {
bestsme = thissme;
- *dst_mv = best_mv;
- tpl_stats->mv_dist[rf_idx] = mv_dist;
- tpl_stats->mv_cost[rf_idx] = mv_cost;
+ *best_mv = temp_mv;
+ *best_mv_dist = mv_dist;
+ *best_mv_cost = mv_cost;
}
}
return bestsme;
diff --git a/vp9/encoder/vp9_mcomp.h b/vp9/encoder/vp9_mcomp.h
index a159cb288..54f68ca74 100644
--- a/vp9/encoder/vp9_mcomp.h
+++ b/vp9/encoder/vp9_mcomp.h
@@ -130,10 +130,10 @@ double vp9_refining_search_sad_new(const MACROBLOCK *x, MV *best_full_mv,
double vp9_full_pixel_diamond_new(const struct VP9_COMP *cpi, MACROBLOCK *x,
MV *mvp_full, int step_param, double lambda,
- int further_steps, int do_refine,
+ int do_refine,
const vp9_variance_fn_ptr_t *fn_ptr,
- const int_mv *nb_full_mvs,
- struct TplDepStats *tpl_stats, int rf_idx);
+ const int_mv *nb_full_mvs, MV *best_mv,
+ double *best_mv_dist, double *best_mv_cost);
double av1_nb_mvs_inconsistency(const MV *mv, const int_mv *nb_mvs);
#endif // CONFIG_NON_GREEDY_MV
diff --git a/vp9/encoder/vp9_ratectrl.c b/vp9/encoder/vp9_ratectrl.c
index cdd824358..b5c002aea 100644
--- a/vp9/encoder/vp9_ratectrl.c
+++ b/vp9/encoder/vp9_ratectrl.c
@@ -550,7 +550,7 @@ static int drop_frame(VP9_COMP *cpi) {
}
}
-int post_encode_drop_screen_content(VP9_COMP *cpi, size_t *size) {
+int post_encode_drop_cbr(VP9_COMP *cpi, size_t *size) {
size_t frame_size = *size << 3;
int64_t new_buffer_level =
cpi->rc.buffer_level + cpi->rc.avg_frame_bandwidth - (int64_t)frame_size;
@@ -570,10 +570,25 @@ int post_encode_drop_screen_content(VP9_COMP *cpi, size_t *size) {
cpi->last_frame_dropped = 1;
cpi->ext_refresh_frame_flags_pending = 0;
if (cpi->use_svc) {
- cpi->svc.last_layer_dropped[cpi->svc.spatial_layer_id] = 1;
- cpi->svc.drop_spatial_layer[cpi->svc.spatial_layer_id] = 1;
- cpi->svc.drop_count[cpi->svc.spatial_layer_id]++;
- cpi->svc.skip_enhancement_layer = 1;
+ SVC *svc = &cpi->svc;
+ int sl = 0;
+ int tl = 0;
+ svc->last_layer_dropped[svc->spatial_layer_id] = 1;
+ svc->drop_spatial_layer[svc->spatial_layer_id] = 1;
+ svc->drop_count[svc->spatial_layer_id]++;
+ svc->skip_enhancement_layer = 1;
+ // Postencode drop is only checked on base spatial layer,
+ // for now if max-q is set on base we force it on all layers.
+ for (sl = 0; sl < svc->number_spatial_layers; ++sl) {
+ for (tl = 0; tl < svc->number_temporal_layers; ++tl) {
+ const int layer =
+ LAYER_IDS_TO_IDX(sl, tl, svc->number_temporal_layers);
+ LAYER_CONTEXT *lc = &svc->layer_context[layer];
+ RATE_CONTROL *lrc = &lc->rc;
+ lrc->force_max_q = 1;
+ lrc->avg_frame_qindex[INTER_FRAME] = cpi->rc.worst_quality;
+ }
+ }
}
return 1;
}
@@ -1271,6 +1286,112 @@ int vp9_frame_type_qdelta(const VP9_COMP *cpi, int rf_level, int q) {
}
#define STATIC_MOTION_THRESH 95
+
+static void pick_kf_q_bound_two_pass(const VP9_COMP *cpi, int *bottom_index,
+ int *top_index) {
+ const VP9_COMMON *const cm = &cpi->common;
+ const RATE_CONTROL *const rc = &cpi->rc;
+ int active_best_quality;
+ int active_worst_quality = cpi->twopass.active_worst_quality;
+
+ if (rc->this_key_frame_forced) {
+ // Handle the special case for key frames forced when we have reached
+ // the maximum key frame interval. Here force the Q to a range
+ // based on the ambient Q to reduce the risk of popping.
+ double last_boosted_q;
+ int delta_qindex;
+ int qindex;
+
+ if (cpi->twopass.last_kfgroup_zeromotion_pct >= STATIC_MOTION_THRESH) {
+ qindex = VPXMIN(rc->last_kf_qindex, rc->last_boosted_qindex);
+ active_best_quality = qindex;
+ last_boosted_q = vp9_convert_qindex_to_q(qindex, cm->bit_depth);
+ delta_qindex = vp9_compute_qdelta(rc, last_boosted_q,
+ last_boosted_q * 1.25, cm->bit_depth);
+ active_worst_quality =
+ VPXMIN(qindex + delta_qindex, active_worst_quality);
+ } else {
+ qindex = rc->last_boosted_qindex;
+ last_boosted_q = vp9_convert_qindex_to_q(qindex, cm->bit_depth);
+ delta_qindex = vp9_compute_qdelta(rc, last_boosted_q,
+ last_boosted_q * 0.75, cm->bit_depth);
+ active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality);
+ }
+ } else {
+ // Not forced keyframe.
+ double q_adj_factor = 1.0;
+ double q_val;
+ // Baseline value derived from cpi->active_worst_quality and kf boost.
+ active_best_quality =
+ get_kf_active_quality(rc, active_worst_quality, cm->bit_depth);
+ if (cpi->twopass.kf_zeromotion_pct >= STATIC_KF_GROUP_THRESH) {
+ active_best_quality /= 4;
+ }
+
+ // Dont allow the active min to be lossless (q0) unlesss the max q
+ // already indicates lossless.
+ active_best_quality =
+ VPXMIN(active_worst_quality, VPXMAX(1, active_best_quality));
+
+ // Allow somewhat lower kf minq with small image formats.
+ if ((cm->width * cm->height) <= (352 * 288)) {
+ q_adj_factor -= 0.25;
+ }
+
+ // Make a further adjustment based on the kf zero motion measure.
+ q_adj_factor += 0.05 - (0.001 * (double)cpi->twopass.kf_zeromotion_pct);
+
+ // Convert the adjustment factor to a qindex delta
+ // on active_best_quality.
+ q_val = vp9_convert_qindex_to_q(active_best_quality, cm->bit_depth);
+ active_best_quality +=
+ vp9_compute_qdelta(rc, q_val, q_val * q_adj_factor, cm->bit_depth);
+ }
+ *top_index = active_worst_quality;
+ *bottom_index = active_best_quality;
+}
+
+static int rc_constant_q(const VP9_COMP *cpi, int *bottom_index, int *top_index,
+ int gf_group_index) {
+ const VP9_COMMON *const cm = &cpi->common;
+ const RATE_CONTROL *const rc = &cpi->rc;
+ const VP9EncoderConfig *const oxcf = &cpi->oxcf;
+ const GF_GROUP *gf_group = &cpi->twopass.gf_group;
+ const int is_intra_frame = frame_is_intra_only(cm);
+
+ const int cq_level = get_active_cq_level_two_pass(&cpi->twopass, rc, oxcf);
+
+ int q = cq_level;
+ int active_best_quality = cq_level;
+ int active_worst_quality = cq_level;
+
+ // Key frame qp decision
+ if (is_intra_frame && rc->frames_to_key > 1)
+ pick_kf_q_bound_two_pass(cpi, &active_best_quality, &active_worst_quality);
+
+ // ARF / GF qp decision
+ if (!is_intra_frame && !rc->is_src_frame_alt_ref &&
+ cpi->refresh_alt_ref_frame) {
+ active_best_quality = get_gf_active_quality(cpi, q, cm->bit_depth);
+
+ // Modify best quality for second level arfs. For mode VPX_Q this
+ // becomes the baseline frame q.
+ if (gf_group->rf_level[gf_group_index] == GF_ARF_LOW) {
+ const int layer_depth = gf_group->layer_depth[gf_group_index];
+ // linearly fit the frame q depending on the layer depth index from
+ // the base layer ARF.
+ active_best_quality = ((layer_depth - 1) * cq_level +
+ active_best_quality + layer_depth / 2) /
+ layer_depth;
+ }
+ }
+
+ q = active_best_quality;
+ *top_index = active_worst_quality;
+ *bottom_index = active_best_quality;
+ return q;
+}
+
static int rc_pick_q_and_bounds_two_pass(const VP9_COMP *cpi, int *bottom_index,
int *top_index, int gf_group_index) {
const VP9_COMMON *const cm = &cpi->common;
@@ -1284,66 +1405,11 @@ static int rc_pick_q_and_bounds_two_pass(const VP9_COMP *cpi, int *bottom_index,
int *inter_minq;
ASSIGN_MINQ_TABLE(cm->bit_depth, inter_minq);
- if (frame_is_intra_only(cm)) {
- if (rc->frames_to_key == 1 && oxcf->rc_mode == VPX_Q) {
- // If the next frame is also a key frame or the current frame is the
- // only frame in the sequence in AOM_Q mode, just use the cq_level
- // as q.
- active_best_quality = cq_level;
- active_worst_quality = cq_level;
- } else if (rc->this_key_frame_forced) {
- // Handle the special case for key frames forced when we have reached
- // the maximum key frame interval. Here force the Q to a range
- // based on the ambient Q to reduce the risk of popping.
- double last_boosted_q;
- int delta_qindex;
- int qindex;
-
- if (cpi->twopass.last_kfgroup_zeromotion_pct >= STATIC_MOTION_THRESH) {
- qindex = VPXMIN(rc->last_kf_qindex, rc->last_boosted_qindex);
- active_best_quality = qindex;
- last_boosted_q = vp9_convert_qindex_to_q(qindex, cm->bit_depth);
- delta_qindex = vp9_compute_qdelta(rc, last_boosted_q,
- last_boosted_q * 1.25, cm->bit_depth);
- active_worst_quality =
- VPXMIN(qindex + delta_qindex, active_worst_quality);
- } else {
- qindex = rc->last_boosted_qindex;
- last_boosted_q = vp9_convert_qindex_to_q(qindex, cm->bit_depth);
- delta_qindex = vp9_compute_qdelta(rc, last_boosted_q,
- last_boosted_q * 0.75, cm->bit_depth);
- active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality);
- }
- } else {
- // Not forced keyframe.
- double q_adj_factor = 1.0;
- double q_val;
- // Baseline value derived from cpi->active_worst_quality and kf boost.
- active_best_quality =
- get_kf_active_quality(rc, active_worst_quality, cm->bit_depth);
- if (cpi->twopass.kf_zeromotion_pct >= STATIC_KF_GROUP_THRESH) {
- active_best_quality /= 4;
- }
-
- // Dont allow the active min to be lossless (q0) unlesss the max q
- // already indicates lossless.
- active_best_quality =
- VPXMIN(active_worst_quality, VPXMAX(1, active_best_quality));
+ if (oxcf->rc_mode == VPX_Q)
+ return rc_constant_q(cpi, bottom_index, top_index, gf_group_index);
- // Allow somewhat lower kf minq with small image formats.
- if ((cm->width * cm->height) <= (352 * 288)) {
- q_adj_factor -= 0.25;
- }
-
- // Make a further adjustment based on the kf zero motion measure.
- q_adj_factor += 0.05 - (0.001 * (double)cpi->twopass.kf_zeromotion_pct);
-
- // Convert the adjustment factor to a qindex delta
- // on active_best_quality.
- q_val = vp9_convert_qindex_to_q(active_best_quality, cm->bit_depth);
- active_best_quality +=
- vp9_compute_qdelta(rc, q_val, q_val * q_adj_factor, cm->bit_depth);
- }
+ if (frame_is_intra_only(cm)) {
+ pick_kf_q_bound_two_pass(cpi, &active_best_quality, &active_worst_quality);
} else if (!rc->is_src_frame_alt_ref &&
(cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) {
// Use the lower of active_worst_quality and recent
@@ -1374,54 +1440,31 @@ static int rc_pick_q_and_bounds_two_pass(const VP9_COMP *cpi, int *bottom_index,
((layer_depth - 1) * q + active_best_quality + layer_depth / 2) /
layer_depth;
}
- } else if (oxcf->rc_mode == VPX_Q) {
- if (!cpi->refresh_alt_ref_frame) {
- active_best_quality = cq_level;
- } else {
- active_best_quality = get_gf_active_quality(cpi, q, cm->bit_depth);
-
- // Modify best quality for second level arfs. For mode VPX_Q this
- // becomes the baseline frame q.
- if (gf_group->rf_level[gf_group_index] == GF_ARF_LOW) {
- const int layer_depth = gf_group->layer_depth[gf_group_index];
- // linearly fit the frame q depending on the layer depth index from
- // the base layer ARF.
- active_best_quality = ((layer_depth - 1) * cq_level +
- active_best_quality + layer_depth / 2) /
- layer_depth;
- }
- }
} else {
active_best_quality = get_gf_active_quality(cpi, q, cm->bit_depth);
}
} else {
- if (oxcf->rc_mode == VPX_Q) {
- active_best_quality = cq_level;
- } else {
- active_best_quality = inter_minq[active_worst_quality];
+ active_best_quality = inter_minq[active_worst_quality];
- // For the constrained quality mode we don't want
- // q to fall below the cq level.
- if ((oxcf->rc_mode == VPX_CQ) && (active_best_quality < cq_level)) {
- active_best_quality = cq_level;
- }
+ // For the constrained quality mode we don't want
+ // q to fall below the cq level.
+ if ((oxcf->rc_mode == VPX_CQ) && (active_best_quality < cq_level)) {
+ active_best_quality = cq_level;
}
}
// Extension to max or min Q if undershoot or overshoot is outside
// the permitted range.
- if (cpi->oxcf.rc_mode != VPX_Q) {
- if (frame_is_intra_only(cm) ||
- (!rc->is_src_frame_alt_ref &&
- (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame))) {
- active_best_quality -=
- (cpi->twopass.extend_minq + cpi->twopass.extend_minq_fast);
- active_worst_quality += (cpi->twopass.extend_maxq / 2);
- } else {
- active_best_quality -=
- (cpi->twopass.extend_minq + cpi->twopass.extend_minq_fast) / 2;
- active_worst_quality += cpi->twopass.extend_maxq;
- }
+ if (frame_is_intra_only(cm) ||
+ (!rc->is_src_frame_alt_ref &&
+ (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame))) {
+ active_best_quality -=
+ (cpi->twopass.extend_minq + cpi->twopass.extend_minq_fast);
+ active_worst_quality += (cpi->twopass.extend_maxq / 2);
+ } else {
+ active_best_quality -=
+ (cpi->twopass.extend_minq + cpi->twopass.extend_minq_fast) / 2;
+ active_worst_quality += cpi->twopass.extend_maxq;
}
// For normal frames do not allow an active minq lower than the q used for
@@ -1457,10 +1500,7 @@ static int rc_pick_q_and_bounds_two_pass(const VP9_COMP *cpi, int *bottom_index,
active_worst_quality =
clamp(active_worst_quality, active_best_quality, rc->worst_quality);
- if (oxcf->rc_mode == VPX_Q) {
- q = active_best_quality;
- // Special case code to try and match quality with forced key frames.
- } else if (frame_is_intra_only(cm) && rc->this_key_frame_forced) {
+ if (frame_is_intra_only(cm) && rc->this_key_frame_forced) {
// If static since last kf use better of last boosted and last kf q.
if (cpi->twopass.last_kfgroup_zeromotion_pct >= STATIC_MOTION_THRESH) {
q = VPXMIN(rc->last_kf_qindex, rc->last_boosted_qindex);
@@ -3021,21 +3061,27 @@ int vp9_encodedframe_overshoot(VP9_COMP *cpi, int frame_size, int *q) {
cpi->rc.rate_correction_factors[INTER_NORMAL] = rate_correction_factor;
}
// For temporal layers, reset the rate control parametes across all
- // temporal layers.
+ // temporal layers. If the first_spatial_layer_to_encode > 0, then this
+ // superframe has skipped lower base layers. So in this case we should also
+ // reset and force max-q for spatial layers < first_spatial_layer_to_encode.
if (cpi->use_svc) {
- int i = 0;
+ int tl = 0;
+ int sl = 0;
SVC *svc = &cpi->svc;
- for (i = 0; i < svc->number_temporal_layers; ++i) {
- const int layer = LAYER_IDS_TO_IDX(svc->spatial_layer_id, i,
- svc->number_temporal_layers);
- LAYER_CONTEXT *lc = &svc->layer_context[layer];
- RATE_CONTROL *lrc = &lc->rc;
- lrc->avg_frame_qindex[INTER_FRAME] = *q;
- lrc->buffer_level = lrc->optimal_buffer_level;
- lrc->bits_off_target = lrc->optimal_buffer_level;
- lrc->rc_1_frame = 0;
- lrc->rc_2_frame = 0;
- lrc->rate_correction_factors[INTER_NORMAL] = rate_correction_factor;
+ for (sl = 0; sl < svc->first_spatial_layer_to_encode; ++sl) {
+ for (tl = 0; tl < svc->number_temporal_layers; ++tl) {
+ const int layer =
+ LAYER_IDS_TO_IDX(sl, tl, svc->number_temporal_layers);
+ LAYER_CONTEXT *lc = &svc->layer_context[layer];
+ RATE_CONTROL *lrc = &lc->rc;
+ lrc->avg_frame_qindex[INTER_FRAME] = *q;
+ lrc->buffer_level = lrc->optimal_buffer_level;
+ lrc->bits_off_target = lrc->optimal_buffer_level;
+ lrc->rc_1_frame = 0;
+ lrc->rc_2_frame = 0;
+ lrc->rate_correction_factors[INTER_NORMAL] = rate_correction_factor;
+ lrc->force_max_q = 1;
+ }
}
}
return 1;
diff --git a/vp9/encoder/vp9_ratectrl.h b/vp9/encoder/vp9_ratectrl.h
index 16aa08137..a343bd34b 100644
--- a/vp9/encoder/vp9_ratectrl.h
+++ b/vp9/encoder/vp9_ratectrl.h
@@ -256,8 +256,8 @@ void vp9_rc_postencode_update_drop_frame(struct VP9_COMP *cpi);
// Changes only the rate correction factors in the rate control structure.
void vp9_rc_update_rate_correction_factors(struct VP9_COMP *cpi);
-// Post encode drop for CBR screen-content mode.
-int post_encode_drop_screen_content(struct VP9_COMP *cpi, size_t *size);
+// Post encode drop for CBR mode.
+int post_encode_drop_cbr(struct VP9_COMP *cpi, size_t *size);
// Decide if we should drop this frame: For 1-pass CBR.
// Changes only the decimation count in the rate control structure
diff --git a/vp9/encoder/vp9_rd.c b/vp9/encoder/vp9_rd.c
index 8323f3af4..18b74f57b 100644
--- a/vp9/encoder/vp9_rd.c
+++ b/vp9/encoder/vp9_rd.c
@@ -174,23 +174,10 @@ static const int rd_frame_type_factor[FRAME_UPDATE_TYPES] = { 128, 144, 128,
128, 144, 144 };
int vp9_compute_rd_mult_based_on_qindex(const VP9_COMP *cpi, int qindex) {
- // largest dc_quant is 21387, therefore rdmult should always fit in uint32_t
- // i.e. 21387 * 21387 * 8 = 3659230152 = 0xDA1B6BC8
+ // largest dc_quant is 21387, therefore rdmult should always fit in int32_t
const int q = vp9_dc_quant(qindex, 0, cpi->common.bit_depth);
- uint32_t rdmult = q * q;
-
- if (cpi->common.frame_type != KEY_FRAME) {
- rdmult = rdmult * 3 + (rdmult * 2 / 3);
- } else {
- if (qindex < 64)
- rdmult = rdmult * 4;
- else if (qindex <= 128)
- rdmult = rdmult * 3 + rdmult / 2;
- else if (qindex < 190)
- rdmult = rdmult * 4 + rdmult / 2;
- else
- rdmult = rdmult * 7 + rdmult / 2;
- }
+ int rdmult = q * q;
+ rdmult = rdmult * 3 + (rdmult * 2 / 3);
#if CONFIG_VP9_HIGHBITDEPTH
switch (cpi->common.bit_depth) {
case VPX_BITS_10: rdmult = ROUND_POWER_OF_TWO(rdmult, 4); break;
diff --git a/vp9/encoder/vp9_rdopt.c b/vp9/encoder/vp9_rdopt.c
index 9cde479cd..2e1aa1d30 100644
--- a/vp9/encoder/vp9_rdopt.c
+++ b/vp9/encoder/vp9_rdopt.c
@@ -2322,9 +2322,7 @@ static void single_motion_search(VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
const VP9_COMMON *cm = &cpi->common;
MODE_INFO *mi = xd->mi[0];
struct buf_2d backup_yv12[MAX_MB_PLANE] = { { 0, 0 } };
- int bestsme = INT_MAX;
int step_param;
- int sadpb = x->sadperbit16;
MV mvp_full;
int ref = mi->ref_frame[0];
MV ref_mv = x->mbmi_ext->ref_mvs[ref][0].as_mv;
@@ -2335,8 +2333,21 @@ static void single_motion_search(VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
vp9_get_scaled_ref_frame(cpi, ref);
const int pw = num_4x4_blocks_wide_lookup[bsize] << 2;
const int ph = num_4x4_blocks_high_lookup[bsize] << 2;
-
MV pred_mv[3];
+
+#if CONFIG_NON_GREEDY_MV
+ double mv_dist = 0;
+ double mv_cost = 0;
+ double lambda = 0;
+ double bestsme;
+ int_mv nb_full_mvs[NB_MVS_NUM];
+ // TODO(angiebird): Set nb_full_mvs properly.
+ vp9_zero(nb_full_mvs);
+#else // CONFIG_NON_GREEDY_MV
+ int bestsme = INT_MAX;
+ int sadpb = x->sadperbit16;
+#endif // CONFIG_NON_GREEDY_MV
+
pred_mv[0] = x->mbmi_ext->ref_mvs[ref][0].as_mv;
pred_mv[1] = x->mbmi_ext->ref_mvs[ref][1].as_mv;
pred_mv[2] = x->pred_mv[ref];
@@ -2406,14 +2417,24 @@ static void single_motion_search(VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
mvp_full.col >>= 3;
mvp_full.row >>= 3;
+#if CONFIG_NON_GREEDY_MV
+ bestsme = vp9_full_pixel_diamond_new(cpi, x, &mvp_full, step_param, lambda, 1,
+ &cpi->fn_ptr[bsize], nb_full_mvs,
+ &tmp_mv->as_mv, &mv_dist, &mv_cost);
+#else // CONFIG_NON_GREEDY_MV
bestsme = vp9_full_pixel_search(
cpi, x, bsize, &mvp_full, step_param, cpi->sf.mv.search_method, sadpb,
cond_cost_list(cpi, cost_list), &ref_mv, &tmp_mv->as_mv, INT_MAX, 1);
+#endif // CONFIG_NON_GREEDY_MV
if (cpi->sf.enhanced_full_pixel_motion_search) {
int i;
for (i = 0; i < 3; ++i) {
+#if CONFIG_NON_GREEDY_MV
+ double this_me;
+#else // CONFIG_NON_GREEDY_MV
int this_me;
+#endif // CONFIG_NON_GREEDY_MV
MV this_mv;
int diff_row;
int diff_col;
@@ -2437,11 +2458,18 @@ static void single_motion_search(VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
mvp_full = pred_mv[i];
mvp_full.col >>= 3;
mvp_full.row >>= 3;
+#if CONFIG_NON_GREEDY_MV
+ this_me = vp9_full_pixel_diamond_new(
+ cpi, x, &mvp_full, VPXMAX(step_param, MAX_MVSEARCH_STEPS - step),
+ lambda, 1, &cpi->fn_ptr[bsize], nb_full_mvs, &tmp_mv->as_mv, &mv_dist,
+ &mv_cost);
+#else // CONFIG_NON_GREEDY_MV
this_me = vp9_full_pixel_search(
cpi, x, bsize, &mvp_full,
VPXMAX(step_param, MAX_MVSEARCH_STEPS - step),
cpi->sf.mv.search_method, sadpb, cond_cost_list(cpi, cost_list),
&ref_mv, &this_mv, INT_MAX, 1);
+#endif // CONFIG_NON_GREEDY_MV
if (this_me < bestsme) {
tmp_mv->as_mv = this_mv;
bestsme = this_me;