summaryrefslogtreecommitdiff
path: root/vp9/common
diff options
context:
space:
mode:
Diffstat (limited to 'vp9/common')
-rw-r--r--vp9/common/vp9_onyxc_int.h1
-rw-r--r--vp9/common/vp9_reconinter.c69
-rw-r--r--vp9/common/vp9_reconinter.h6
-rw-r--r--vp9/common/vp9_scale.c115
-rw-r--r--vp9/common/vp9_scale.h28
5 files changed, 130 insertions, 89 deletions
diff --git a/vp9/common/vp9_onyxc_int.h b/vp9/common/vp9_onyxc_int.h
index bc6535d61..dc9c3346b 100644
--- a/vp9/common/vp9_onyxc_int.h
+++ b/vp9/common/vp9_onyxc_int.h
@@ -116,6 +116,7 @@ typedef struct VP9Common {
// Each frame can reference ALLOWED_REFS_PER_FRAME buffers
int active_ref_idx[ALLOWED_REFS_PER_FRAME];
struct scale_factors active_ref_scale[ALLOWED_REFS_PER_FRAME];
+ struct scale_factors_common active_ref_scale_comm[ALLOWED_REFS_PER_FRAME];
int new_fb_idx;
YV12_BUFFER_CONFIG post_proc_buffer;
diff --git a/vp9/common/vp9_reconinter.c b/vp9/common/vp9_reconinter.c
index 6f16ac70a..85020e295 100644
--- a/vp9/common/vp9_reconinter.c
+++ b/vp9/common/vp9_reconinter.c
@@ -40,6 +40,24 @@ void vp9_setup_interp_filters(MACROBLOCKD *xd,
assert(((intptr_t)xd->subpix.filter_x & 0xff) == 0);
}
+static void inter_predictor(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride,
+ const MV32 *mv,
+ const struct scale_factors *scale,
+ int w, int h, int ref,
+ const struct subpix_fn_table *subpix,
+ int xs, int ys) {
+ const int subpel_x = mv->col & SUBPEL_MASK;
+ const int subpel_y = mv->row & SUBPEL_MASK;
+
+ src += (mv->row >> SUBPEL_BITS) * src_stride + (mv->col >> SUBPEL_BITS);
+ scale->sfc->predict[subpel_x != 0][subpel_y != 0][ref](
+ src, src_stride, dst, dst_stride,
+ subpix->filter_x[subpel_x], xs,
+ subpix->filter_y[subpel_y], ys,
+ w, h);
+}
+
void vp9_build_inter_predictor(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const MV *src_mv,
@@ -50,16 +68,11 @@ void vp9_build_inter_predictor(const uint8_t *src, int src_stride,
const int is_q4 = precision == MV_PRECISION_Q4;
const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2,
is_q4 ? src_mv->col : src_mv->col * 2 };
- const MV32 mv = scale->scale_mv(&mv_q4, scale);
- const int subpel_x = mv.col & SUBPEL_MASK;
- const int subpel_y = mv.row & SUBPEL_MASK;
+ const struct scale_factors_common *sfc = scale->sfc;
+ const MV32 mv = sfc->scale_mv(&mv_q4, scale);
- src += (mv.row >> SUBPEL_BITS) * src_stride + (mv.col >> SUBPEL_BITS);
- scale->predict[subpel_x != 0][subpel_y != 0][ref](
- src, src_stride, dst, dst_stride,
- subpix->filter_x[subpel_x], scale->x_step_q4,
- subpix->filter_y[subpel_y], scale->y_step_q4,
- w, h);
+ inter_predictor(src, src_stride, dst, dst_stride, &mv, scale,
+ w, h, ref, subpix, sfc->x_step_q4, sfc->y_step_q4);
}
static INLINE int round_mv_comp_q4(int value) {
@@ -133,10 +146,6 @@ static void build_inter_predictors(int plane, int block, BLOCK_SIZE bsize,
struct scale_factors *const scale = &xd->scale_factor[ref];
struct buf_2d *const pre_buf = &pd->pre[ref];
struct buf_2d *const dst_buf = &pd->dst;
-
- const uint8_t *const pre = pre_buf->buf + scaled_buffer_offset(x, y,
- pre_buf->stride, scale);
-
uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x;
// TODO(jkoleszar): All chroma MVs in SPLITMV mode are taken as the
@@ -156,11 +165,29 @@ static void build_inter_predictors(int plane, int block, BLOCK_SIZE bsize,
pd->subsampling_x,
pd->subsampling_y);
- scale->set_scaled_offsets(scale, arg->y + y, arg->x + x);
- vp9_build_inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride,
- &res_mv, scale,
- 4 << pred_w, 4 << pred_h, ref,
- &xd->subpix, MV_PRECISION_Q4);
+ uint8_t *pre;
+ // mv_precision precision is MV_PRECISION_Q4.
+ const MV mv_q4 = {res_mv.row, res_mv.col };
+ MV32 scaled_mv;
+ int xs, ys;
+
+ if (vp9_is_scaled(scale->sfc)) {
+ pre = pre_buf->buf + scaled_buffer_offset(x, y, pre_buf->stride, scale);
+ scale->sfc->set_scaled_offsets(scale, arg->y + y, arg->x + x);
+ scaled_mv = scale->sfc->scale_mv(&mv_q4, scale);
+ xs = scale->sfc->x_step_q4;
+ ys = scale->sfc->y_step_q4;
+ } else {
+ pre = pre_buf->buf + (y * pre_buf->stride + x);
+ scaled_mv.row = mv_q4.row;
+ scaled_mv.col = mv_q4.col;
+ xs = ys = 16;
+ }
+
+ inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride,
+ &scaled_mv, scale,
+ 4 << pred_w, 4 << pred_h, ref,
+ &xd->subpix, xs, ys);
}
}
@@ -220,15 +247,17 @@ void vp9_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
void vp9_setup_scale_factors(VP9_COMMON *cm, int i) {
const int ref = cm->active_ref_idx[i];
struct scale_factors *const sf = &cm->active_ref_scale[i];
+ struct scale_factors_common *const sfc = &cm->active_ref_scale_comm[i];
if (ref >= NUM_YV12_BUFFERS) {
vp9_zero(*sf);
+ vp9_zero(*sfc);
} else {
YV12_BUFFER_CONFIG *const fb = &cm->yv12_fb[ref];
- vp9_setup_scale_factors_for_frame(sf,
+ vp9_setup_scale_factors_for_frame(sf, sfc,
fb->y_crop_width, fb->y_crop_height,
cm->width, cm->height);
- if (vp9_is_scaled(sf))
+ if (vp9_is_scaled(sfc))
vp9_extend_frame_borders(fb, cm->subsampling_x, cm->subsampling_y);
}
}
diff --git a/vp9/common/vp9_reconinter.h b/vp9/common/vp9_reconinter.h
index 504b79356..610ee4acc 100644
--- a/vp9/common/vp9_reconinter.h
+++ b/vp9/common/vp9_reconinter.h
@@ -38,8 +38,10 @@ void vp9_build_inter_predictor(const uint8_t *src, int src_stride,
static int scaled_buffer_offset(int x_offset, int y_offset, int stride,
const struct scale_factors *scale) {
- const int x = scale ? scale->scale_value_x(x_offset, scale) : x_offset;
- const int y = scale ? scale->scale_value_y(y_offset, scale) : y_offset;
+ const int x = scale ? scale->sfc->scale_value_x(x_offset, scale->sfc) :
+ x_offset;
+ const int y = scale ? scale->sfc->scale_value_y(y_offset, scale->sfc) :
+ y_offset;
return y * stride + x;
}
diff --git a/vp9/common/vp9_scale.c b/vp9/common/vp9_scale.c
index 989206c60..3f0994f80 100644
--- a/vp9/common/vp9_scale.c
+++ b/vp9/common/vp9_scale.c
@@ -12,23 +12,23 @@
#include "vp9/common/vp9_filter.h"
#include "vp9/common/vp9_scale.h"
-static INLINE int scaled_x(int val, const struct scale_factors *scale) {
- return val * scale->x_scale_fp >> REF_SCALE_SHIFT;
+static INLINE int scaled_x(int val, const struct scale_factors_common *sfc) {
+ return val * sfc->x_scale_fp >> REF_SCALE_SHIFT;
}
-static INLINE int scaled_y(int val, const struct scale_factors *scale) {
- return val * scale->y_scale_fp >> REF_SCALE_SHIFT;
+static INLINE int scaled_y(int val, const struct scale_factors_common *sfc) {
+ return val * sfc->y_scale_fp >> REF_SCALE_SHIFT;
}
-static int unscaled_value(int val, const struct scale_factors *scale) {
- (void) scale;
+static int unscaled_value(int val, const struct scale_factors_common *sfc) {
+ (void) sfc;
return val;
}
static MV32 scaled_mv(const MV *mv, const struct scale_factors *scale) {
const MV32 res = {
- scaled_y(mv->row, scale) + scale->y_offset_q4,
- scaled_x(mv->col, scale) + scale->x_offset_q4
+ scaled_y(mv->row, scale->sfc) + scale->y_offset_q4,
+ scaled_x(mv->col, scale->sfc) + scale->x_offset_q4
};
return res;
}
@@ -43,8 +43,8 @@ static MV32 unscaled_mv(const MV *mv, const struct scale_factors *scale) {
static void set_offsets_with_scaling(struct scale_factors *scale,
int row, int col) {
- scale->x_offset_q4 = scaled_x(col << SUBPEL_BITS, scale) & SUBPEL_MASK;
- scale->y_offset_q4 = scaled_y(row << SUBPEL_BITS, scale) & SUBPEL_MASK;
+ scale->x_offset_q4 = scaled_x(col << SUBPEL_BITS, scale->sfc) & SUBPEL_MASK;
+ scale->y_offset_q4 = scaled_y(row << SUBPEL_BITS, scale->sfc) & SUBPEL_MASK;
}
static void set_offsets_without_scaling(struct scale_factors *scale,
@@ -70,31 +70,30 @@ static int check_scale_factors(int other_w, int other_h,
}
void vp9_setup_scale_factors_for_frame(struct scale_factors *scale,
+ struct scale_factors_common *scale_comm,
int other_w, int other_h,
int this_w, int this_h) {
if (!check_scale_factors(other_w, other_h, this_w, this_h)) {
- scale->x_scale_fp = REF_INVALID_SCALE;
- scale->y_scale_fp = REF_INVALID_SCALE;
+ scale_comm->x_scale_fp = REF_INVALID_SCALE;
+ scale_comm->y_scale_fp = REF_INVALID_SCALE;
return;
}
- scale->x_scale_fp = get_fixed_point_scale_factor(other_w, this_w);
- scale->y_scale_fp = get_fixed_point_scale_factor(other_h, this_h);
- scale->x_step_q4 = scaled_x(16, scale);
- scale->y_step_q4 = scaled_y(16, scale);
- scale->x_offset_q4 = 0; // calculated per block
- scale->y_offset_q4 = 0; // calculated per block
+ scale_comm->x_scale_fp = get_fixed_point_scale_factor(other_w, this_w);
+ scale_comm->y_scale_fp = get_fixed_point_scale_factor(other_h, this_h);
+ scale_comm->x_step_q4 = scaled_x(16, scale_comm);
+ scale_comm->y_step_q4 = scaled_y(16, scale_comm);
- if (vp9_is_scaled(scale)) {
- scale->scale_value_x = scaled_x;
- scale->scale_value_y = scaled_y;
- scale->set_scaled_offsets = set_offsets_with_scaling;
- scale->scale_mv = scaled_mv;
+ if (vp9_is_scaled(scale_comm)) {
+ scale_comm->scale_value_x = scaled_x;
+ scale_comm->scale_value_y = scaled_y;
+ scale_comm->set_scaled_offsets = set_offsets_with_scaling;
+ scale_comm->scale_mv = scaled_mv;
} else {
- scale->scale_value_x = unscaled_value;
- scale->scale_value_y = unscaled_value;
- scale->set_scaled_offsets = set_offsets_without_scaling;
- scale->scale_mv = unscaled_mv;
+ scale_comm->scale_value_x = unscaled_value;
+ scale_comm->scale_value_y = unscaled_value;
+ scale_comm->set_scaled_offsets = set_offsets_without_scaling;
+ scale_comm->scale_mv = unscaled_mv;
}
// TODO(agrange): Investigate the best choice of functions to use here
@@ -103,44 +102,48 @@ void vp9_setup_scale_factors_for_frame(struct scale_factors *scale,
// applied in one direction only, and not at all for 0,0, seems to give the
// best quality, but it may be worth trying an additional mode that does
// do the filtering on full-pel.
- if (scale->x_step_q4 == 16) {
- if (scale->y_step_q4 == 16) {
+ if (scale_comm->x_step_q4 == 16) {
+ if (scale_comm->y_step_q4 == 16) {
// No scaling in either direction.
- scale->predict[0][0][0] = vp9_convolve_copy;
- scale->predict[0][0][1] = vp9_convolve_avg;
- scale->predict[0][1][0] = vp9_convolve8_vert;
- scale->predict[0][1][1] = vp9_convolve8_avg_vert;
- scale->predict[1][0][0] = vp9_convolve8_horiz;
- scale->predict[1][0][1] = vp9_convolve8_avg_horiz;
+ scale_comm->predict[0][0][0] = vp9_convolve_copy;
+ scale_comm->predict[0][0][1] = vp9_convolve_avg;
+ scale_comm->predict[0][1][0] = vp9_convolve8_vert;
+ scale_comm->predict[0][1][1] = vp9_convolve8_avg_vert;
+ scale_comm->predict[1][0][0] = vp9_convolve8_horiz;
+ scale_comm->predict[1][0][1] = vp9_convolve8_avg_horiz;
} else {
// No scaling in x direction. Must always scale in the y direction.
- scale->predict[0][0][0] = vp9_convolve8_vert;
- scale->predict[0][0][1] = vp9_convolve8_avg_vert;
- scale->predict[0][1][0] = vp9_convolve8_vert;
- scale->predict[0][1][1] = vp9_convolve8_avg_vert;
- scale->predict[1][0][0] = vp9_convolve8;
- scale->predict[1][0][1] = vp9_convolve8_avg;
+ scale_comm->predict[0][0][0] = vp9_convolve8_vert;
+ scale_comm->predict[0][0][1] = vp9_convolve8_avg_vert;
+ scale_comm->predict[0][1][0] = vp9_convolve8_vert;
+ scale_comm->predict[0][1][1] = vp9_convolve8_avg_vert;
+ scale_comm->predict[1][0][0] = vp9_convolve8;
+ scale_comm->predict[1][0][1] = vp9_convolve8_avg;
}
} else {
- if (scale->y_step_q4 == 16) {
+ if (scale_comm->y_step_q4 == 16) {
// No scaling in the y direction. Must always scale in the x direction.
- scale->predict[0][0][0] = vp9_convolve8_horiz;
- scale->predict[0][0][1] = vp9_convolve8_avg_horiz;
- scale->predict[0][1][0] = vp9_convolve8;
- scale->predict[0][1][1] = vp9_convolve8_avg;
- scale->predict[1][0][0] = vp9_convolve8_horiz;
- scale->predict[1][0][1] = vp9_convolve8_avg_horiz;
+ scale_comm->predict[0][0][0] = vp9_convolve8_horiz;
+ scale_comm->predict[0][0][1] = vp9_convolve8_avg_horiz;
+ scale_comm->predict[0][1][0] = vp9_convolve8;
+ scale_comm->predict[0][1][1] = vp9_convolve8_avg;
+ scale_comm->predict[1][0][0] = vp9_convolve8_horiz;
+ scale_comm->predict[1][0][1] = vp9_convolve8_avg_horiz;
} else {
// Must always scale in both directions.
- scale->predict[0][0][0] = vp9_convolve8;
- scale->predict[0][0][1] = vp9_convolve8_avg;
- scale->predict[0][1][0] = vp9_convolve8;
- scale->predict[0][1][1] = vp9_convolve8_avg;
- scale->predict[1][0][0] = vp9_convolve8;
- scale->predict[1][0][1] = vp9_convolve8_avg;
+ scale_comm->predict[0][0][0] = vp9_convolve8;
+ scale_comm->predict[0][0][1] = vp9_convolve8_avg;
+ scale_comm->predict[0][1][0] = vp9_convolve8;
+ scale_comm->predict[0][1][1] = vp9_convolve8_avg;
+ scale_comm->predict[1][0][0] = vp9_convolve8;
+ scale_comm->predict[1][0][1] = vp9_convolve8_avg;
}
}
// 2D subpel motion always gets filtered in both directions
- scale->predict[1][1][0] = vp9_convolve8;
- scale->predict[1][1][1] = vp9_convolve8_avg;
+ scale_comm->predict[1][1][0] = vp9_convolve8;
+ scale_comm->predict[1][1][1] = vp9_convolve8_avg;
+
+ scale->sfc = scale_comm;
+ scale->x_offset_q4 = 0; // calculated per block
+ scale->y_offset_q4 = 0; // calculated per block
}
diff --git a/vp9/common/vp9_scale.h b/vp9/common/vp9_scale.h
index ece011477..1437fcd9c 100644
--- a/vp9/common/vp9_scale.h
+++ b/vp9/common/vp9_scale.h
@@ -18,34 +18,40 @@
#define REF_NO_SCALE (1 << REF_SCALE_SHIFT)
#define REF_INVALID_SCALE -1
-struct scale_factors {
+struct scale_factors;
+struct scale_factors_common {
int x_scale_fp; // horizontal fixed point scale factor
int y_scale_fp; // vertical fixed point scale factor
- int x_offset_q4;
int x_step_q4;
- int y_offset_q4;
int y_step_q4;
- int (*scale_value_x)(int val, const struct scale_factors *scale);
- int (*scale_value_y)(int val, const struct scale_factors *scale);
+ int (*scale_value_x)(int val, const struct scale_factors_common *sfc);
+ int (*scale_value_y)(int val, const struct scale_factors_common *sfc);
void (*set_scaled_offsets)(struct scale_factors *scale, int row, int col);
MV32 (*scale_mv)(const MV *mv, const struct scale_factors *scale);
convolve_fn_t predict[2][2][2]; // horiz, vert, avg
};
+struct scale_factors {
+ int x_offset_q4;
+ int y_offset_q4;
+ const struct scale_factors_common *sfc;
+};
+
void vp9_setup_scale_factors_for_frame(struct scale_factors *scale,
+ struct scale_factors_common *scale_comm,
int other_w, int other_h,
int this_w, int this_h);
-static int vp9_is_valid_scale(const struct scale_factors *sf) {
- return sf->x_scale_fp != REF_INVALID_SCALE &&
- sf->y_scale_fp != REF_INVALID_SCALE;
+static int vp9_is_valid_scale(const struct scale_factors_common *sfc) {
+ return sfc->x_scale_fp != REF_INVALID_SCALE &&
+ sfc->y_scale_fp != REF_INVALID_SCALE;
}
-static int vp9_is_scaled(const struct scale_factors *sf) {
- return sf->x_scale_fp != REF_NO_SCALE ||
- sf->y_scale_fp != REF_NO_SCALE;
+static int vp9_is_scaled(const struct scale_factors_common *sfc) {
+ return sfc->x_scale_fp != REF_NO_SCALE ||
+ sfc->y_scale_fp != REF_NO_SCALE;
}
#endif // VP9_COMMON_VP9_SCALE_H_