summaryrefslogtreecommitdiff
path: root/vp9/encoder
diff options
context:
space:
mode:
Diffstat (limited to 'vp9/encoder')
-rw-r--r--vp9/encoder/vp9_encodeframe.c22
-rw-r--r--vp9/encoder/vp9_encodemb.c2
-rw-r--r--vp9/encoder/vp9_mbgraph.c5
-rw-r--r--vp9/encoder/vp9_rdopt.c65
-rw-r--r--vp9/encoder/vp9_temporal_filter.c62
5 files changed, 84 insertions, 72 deletions
diff --git a/vp9/encoder/vp9_encodeframe.c b/vp9/encoder/vp9_encodeframe.c
index 880555797..fec5a7c61 100644
--- a/vp9/encoder/vp9_encodeframe.c
+++ b/vp9/encoder/vp9_encodeframe.c
@@ -2125,22 +2125,14 @@ static void encode_macroblock(VP9_COMP *cpi, TOKENEXTRA **t,
mbmi->mb_skip_coeff = 0;
} else {
- vp9_build_1st_inter16x16_predictors_mb(xd,
- xd->dst.y_buffer,
- xd->dst.u_buffer,
- xd->dst.v_buffer,
- xd->dst.y_stride,
- xd->dst.uv_stride);
- if (xd->mode_info_context->mbmi.second_ref_frame > 0) {
- vp9_build_2nd_inter16x16_predictors_mb(xd,
- xd->dst.y_buffer,
- xd->dst.u_buffer,
- xd->dst.v_buffer,
- xd->dst.y_stride,
- xd->dst.uv_stride);
- }
+ vp9_build_inter16x16_predictors_mb(xd,
+ xd->dst.y_buffer,
+ xd->dst.u_buffer,
+ xd->dst.v_buffer,
+ xd->dst.y_stride,
+ xd->dst.uv_stride);
#if CONFIG_COMP_INTERINTRA_PRED
- else if (xd->mode_info_context->mbmi.second_ref_frame == INTRA_FRAME) {
+ if (xd->mode_info_context->mbmi.second_ref_frame == INTRA_FRAME) {
vp9_build_interintra_16x16_predictors_mb(xd,
xd->dst.y_buffer,
xd->dst.u_buffer,
diff --git a/vp9/encoder/vp9_encodemb.c b/vp9/encoder/vp9_encodemb.c
index 7a63542c1..a753bf40f 100644
--- a/vp9/encoder/vp9_encodemb.c
+++ b/vp9/encoder/vp9_encodemb.c
@@ -713,7 +713,7 @@ void vp9_encode_inter16x16y(MACROBLOCK *x) {
MACROBLOCKD *xd = &x->e_mbd;
BLOCK *b = &x->block[0];
- vp9_build_1st_inter16x16_predictors_mby(xd, xd->predictor, 16, 0);
+ vp9_build_inter16x16_predictors_mby(xd, xd->predictor, 16);
vp9_subtract_mby(x->src_diff, *(b->base_src), xd->predictor, b->src_stride);
diff --git a/vp9/encoder/vp9_mbgraph.c b/vp9/encoder/vp9_mbgraph.c
index 218a47a8e..40823f60c 100644
--- a/vp9/encoder/vp9_mbgraph.c
+++ b/vp9/encoder/vp9_mbgraph.c
@@ -72,7 +72,7 @@ static unsigned int do_16x16_motion_iteration(VP9_COMP *cpi,
}
vp9_set_mbmode_and_mvs(x, NEWMV, dst_mv);
- vp9_build_1st_inter16x16_predictors_mby(xd, xd->predictor, 16, 0);
+ vp9_build_inter16x16_predictors_mby(xd, xd->predictor, 16);
best_err = vp9_sad16x16(xd->dst.y_buffer, xd->dst.y_stride,
xd->predictor, 16, INT_MAX);
@@ -292,6 +292,9 @@ static void update_mbgraph_frame_stats
int_mv arf_top_mv, gld_top_mv;
MODE_INFO mi_local;
+ // Make sure the mi context starts in a consistent state.
+ memset(&mi_local, 0, sizeof(mi_local));
+
// Set up limit values for motion vectors to prevent them extending outside the UMV borders
arf_top_mv.as_int = 0;
gld_top_mv.as_int = 0;
diff --git a/vp9/encoder/vp9_rdopt.c b/vp9/encoder/vp9_rdopt.c
index d65894b5b..6e1122f3e 100644
--- a/vp9/encoder/vp9_rdopt.c
+++ b/vp9/encoder/vp9_rdopt.c
@@ -2161,9 +2161,22 @@ static int64_t encode_inter_mb_segment(MACROBLOCK *x,
BLOCK *be = &x->block[i];
int thisdistortion;
- vp9_build_inter_predictors_b(bd, 16, &xd->subpix);
- if (xd->mode_info_context->mbmi.second_ref_frame > 0)
- vp9_build_2nd_inter_predictors_b(bd, 16, &xd->subpix);
+ vp9_build_inter_predictor(*(bd->base_pre) + bd->pre,
+ bd->pre_stride,
+ bd->predictor, 16,
+ &bd->bmi.as_mv[0],
+ &xd->scale_factor[0],
+ 4, 4, 0 /* no avg */, &xd->subpix);
+
+ if (xd->mode_info_context->mbmi.second_ref_frame > 0) {
+ vp9_build_inter_predictor(*(bd->base_second_pre) + bd->pre,
+ bd->pre_stride,
+ bd->predictor, 16,
+ &bd->bmi.as_mv[1],
+ &xd->scale_factor[1],
+ 4, 4, 1 /* avg */, &xd->subpix);
+ }
+
vp9_subtract_b(be, bd, 16);
x->fwd_txm4x4(be->src_diff, be->coeff, 32);
x->quantize_b_4x4(be, bd);
@@ -2205,14 +2218,25 @@ static int64_t encode_inter_mb_segment_8x8(MACROBLOCK *x,
int ib = vp9_i8x8_block[i];
if (labels[ib] == which_label) {
+ const int use_second_ref =
+ xd->mode_info_context->mbmi.second_ref_frame > 0;
+ int which_mv;
int idx = (ib & 8) + ((ib & 2) << 1);
BLOCKD *bd = &xd->block[ib], *bd2 = &xd->block[idx];
BLOCK *be = &x->block[ib], *be2 = &x->block[idx];
int thisdistortion;
- vp9_build_inter_predictors4b(xd, bd, 16);
- if (xd->mode_info_context->mbmi.second_ref_frame > 0)
- vp9_build_2nd_inter_predictors4b(xd, bd, 16);
+ for (which_mv = 0; which_mv < 1 + use_second_ref; ++which_mv) {
+ uint8_t **base_pre = which_mv ? bd->base_second_pre : bd->base_pre;
+
+ vp9_build_inter_predictor(*base_pre + bd->pre,
+ bd->pre_stride,
+ bd->predictor, 16,
+ &bd->bmi.as_mv[which_mv],
+ &xd->scale_factor[which_mv],
+ 8, 8, which_mv, &xd->subpix);
+ }
+
vp9_subtract_4b_c(be, bd, 16);
if (xd->mode_info_context->mbmi.txfm_size == TX_4X4) {
@@ -3528,19 +3552,19 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
unsigned int sse, var;
int tmp_rate_y, tmp_rate_u, tmp_rate_v;
int tmp_dist_y, tmp_dist_u, tmp_dist_v;
- vp9_build_1st_inter16x16_predictors_mby(xd, xd->predictor, 16, 0);
- if (is_comp_pred)
- vp9_build_2nd_inter16x16_predictors_mby(xd, xd->predictor, 16);
+ // TODO(jkoleszar): these 2 y/uv should be replaced with one call to
+ // vp9_build_interintra_16x16_predictors_mb().
+ vp9_build_inter16x16_predictors_mby(xd, xd->predictor, 16);
+
#if CONFIG_COMP_INTERINTRA_PRED
if (is_comp_interintra_pred) {
vp9_build_interintra_16x16_predictors_mby(xd, xd->predictor, 16);
}
#endif
- vp9_build_1st_inter16x16_predictors_mbuv(xd, xd->predictor + 256,
- xd->predictor + 320, 8);
- if (is_comp_pred)
- vp9_build_2nd_inter16x16_predictors_mbuv(xd, xd->predictor + 256,
- xd->predictor + 320, 8);
+
+ vp9_build_inter16x16_predictors_mbuv(xd, xd->predictor + 256,
+ xd->predictor + 320, 8);
+
#if CONFIG_COMP_INTERINTRA_PRED
if (is_comp_interintra_pred) {
vp9_build_interintra_16x16_predictors_mbuv(xd, xd->predictor + 256,
@@ -3644,19 +3668,16 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
xd->dst.y_stride,
xd->dst.uv_stride);
} else {
- vp9_build_1st_inter16x16_predictors_mby(xd, xd->predictor, 16, 0);
- if (is_comp_pred)
- vp9_build_2nd_inter16x16_predictors_mby(xd, xd->predictor, 16);
+ // TODO(jkoleszar): These y/uv fns can be replaced with their mb
+ // equivalent
+ vp9_build_inter16x16_predictors_mby(xd, xd->predictor, 16);
#if CONFIG_COMP_INTERINTRA_PRED
if (is_comp_interintra_pred) {
vp9_build_interintra_16x16_predictors_mby(xd, xd->predictor, 16);
}
#endif
- vp9_build_1st_inter16x16_predictors_mbuv(xd, &xd->predictor[256],
- &xd->predictor[320], 8);
- if (is_comp_pred)
- vp9_build_2nd_inter16x16_predictors_mbuv(xd, &xd->predictor[256],
- &xd->predictor[320], 8);
+ vp9_build_inter16x16_predictors_mbuv(xd, &xd->predictor[256],
+ &xd->predictor[320], 8);
#if CONFIG_COMP_INTERINTRA_PRED
if (is_comp_interintra_pred) {
vp9_build_interintra_16x16_predictors_mbuv(xd, &xd->predictor[256],
diff --git a/vp9/encoder/vp9_temporal_filter.c b/vp9/encoder/vp9_temporal_filter.c
index d016e52cc..39c02e6ad 100644
--- a/vp9/encoder/vp9_temporal_filter.c
+++ b/vp9/encoder/vp9_temporal_filter.c
@@ -12,6 +12,7 @@
#include <limits.h>
#include "vp9/common/vp9_onyxc_int.h"
+#include "vp9/common/vp9_reconinter.h"
#include "vp9/encoder/vp9_onyx_int.h"
#include "vp9/common/vp9_systemdependent.h"
#include "vp9/encoder/vp9_quantize.h"
@@ -42,40 +43,35 @@ static void temporal_filter_predictors_mb_c(MACROBLOCKD *xd,
int mv_row,
int mv_col,
uint8_t *pred) {
- int offset;
- uint8_t *yptr, *uptr, *vptr;
- int omv_row, omv_col;
-
- // Y
- yptr = y_mb_ptr + (mv_row >> 3) * stride + (mv_col >> 3);
-
- xd->subpix.predict[!!(mv_col & 7)][!!(mv_row & 7)][0](
- yptr, stride, &pred[0], 16,
- xd->subpix.filter_x[(mv_col & 7) << 1], xd->subpix.x_step_q4,
- xd->subpix.filter_y[(mv_row & 7) << 1], xd->subpix.y_step_q4,
- 16, 16);
-
- // U & V
- omv_row = mv_row;
- omv_col = mv_col;
- mv_row >>= 1;
- mv_col >>= 1;
+ const int which_mv = 0;
+ int_mv subpel_mv;
+ int_mv fullpel_mv;
+
+ subpel_mv.as_mv.row = mv_row;
+ subpel_mv.as_mv.col = mv_col;
+ // TODO(jkoleszar): Make this rounding consistent with the rest of the code
+ fullpel_mv.as_mv.row = (mv_row >> 1) & ~7;
+ fullpel_mv.as_mv.col = (mv_col >> 1) & ~7;
+
+ vp9_build_inter_predictor(y_mb_ptr, stride,
+ &pred[0], 16,
+ &subpel_mv,
+ &xd->scale_factor[which_mv],
+ 16, 16, which_mv, &xd->subpix);
+
stride = (stride + 1) >> 1;
- offset = (mv_row >> 3) * stride + (mv_col >> 3);
- uptr = u_mb_ptr + offset;
- vptr = v_mb_ptr + offset;
-
- xd->subpix.predict[!!(omv_col & 15)][!!(omv_row & 15)][0](
- uptr, stride, &pred[256], 8,
- xd->subpix.filter_x[(omv_col & 15)], xd->subpix.x_step_q4,
- xd->subpix.filter_y[(omv_row & 15)], xd->subpix.y_step_q4,
- 8, 8);
-
- xd->subpix.predict[!!(omv_col & 15)][!!(omv_row & 15)][0](
- vptr, stride, &pred[320], 8,
- xd->subpix.filter_x[(omv_col & 15)], xd->subpix.x_step_q4,
- xd->subpix.filter_y[(omv_row & 15)], xd->subpix.y_step_q4,
- 8, 8);
+
+ vp9_build_inter_predictor_q4(u_mb_ptr, stride,
+ &pred[256], 8,
+ &fullpel_mv, &subpel_mv,
+ &xd->scale_factor_uv[which_mv],
+ 8, 8, which_mv, &xd->subpix);
+
+ vp9_build_inter_predictor_q4(v_mb_ptr, stride,
+ &pred[320], 8,
+ &fullpel_mv, &subpel_mv,
+ &xd->scale_factor_uv[which_mv],
+ 8, 8, which_mv, &xd->subpix);
}
void vp9_temporal_filter_apply_c(uint8_t *frame1,