summaryrefslogtreecommitdiff
path: root/vp9/common
diff options
context:
space:
mode:
Diffstat (limited to 'vp9/common')
-rw-r--r--vp9/common/vp9_blockd.h6
-rw-r--r--vp9/common/vp9_convolve.c56
-rw-r--r--vp9/common/vp9_convolve.h3
-rw-r--r--vp9/common/vp9_filter.c15
-rw-r--r--vp9/common/vp9_idct.h14
-rw-r--r--vp9/common/vp9_idctllm.c7
-rw-r--r--vp9/common/vp9_invtrans.c2
-rw-r--r--vp9/common/vp9_mbpitch.c11
-rw-r--r--vp9/common/vp9_onyxc_int.h9
-rw-r--r--vp9/common/vp9_reconinter.c345
-rw-r--r--vp9/common/vp9_reconinter.h144
-rw-r--r--vp9/common/vp9_reconintra.h64
-rw-r--r--vp9/common/vp9_rtcd.c5
-rw-r--r--vp9/common/vp9_rtcd_defs.sh2
-rw-r--r--vp9/common/vp9_setupintrarecon.h2
-rw-r--r--vp9/common/x86/vp9_idctllm_x86.c76
16 files changed, 544 insertions, 217 deletions
diff --git a/vp9/common/vp9_blockd.h b/vp9/common/vp9_blockd.h
index 5d876c15b..372a58ec4 100644
--- a/vp9/common/vp9_blockd.h
+++ b/vp9/common/vp9_blockd.h
@@ -276,8 +276,6 @@ typedef struct blockd {
int dst;
int dst_stride;
- int eob;
-
union b_mode_info bmi;
} BLOCKD;
@@ -292,9 +290,12 @@ struct scale_factors {
int x_num;
int x_den;
int x_offset_q4;
+ int x_step_q4;
int y_num;
int y_den;
int y_offset_q4;
+ int y_step_q4;
+ convolve_fn_t predict[2][2][2]; // horiz, vert, avg
};
typedef struct macroblockd {
@@ -302,6 +303,7 @@ typedef struct macroblockd {
DECLARE_ALIGNED(16, uint8_t, predictor[384]);
DECLARE_ALIGNED(16, int16_t, qcoeff[384]);
DECLARE_ALIGNED(16, int16_t, dqcoeff[384]);
+ DECLARE_ALIGNED(16, uint16_t, eobs[24]);
SUPERBLOCKD sb_coeff_data;
diff --git a/vp9/common/vp9_convolve.c b/vp9/common/vp9_convolve.c
index ac5d5cb3e..b062e7dc7 100644
--- a/vp9/common/vp9_convolve.c
+++ b/vp9/common/vp9_convolve.c
@@ -19,7 +19,6 @@
#define VP9_FILTER_WEIGHT 128
#define VP9_FILTER_SHIFT 7
-#define ALIGN_FILTERS_256 0
/* Assume a bank of 16 filters to choose from. There are two implementations
* for filter wrapping behavior, since we want to be able to pick which filter
@@ -34,8 +33,11 @@
* always 256 byte aligned.
*
* Implementations 2 and 3 are likely preferable, as they avoid an extra 2
- * parameters, and switching between them is trivial.
+ * parameters, and switching between them is trivial, with the
+ * ALIGN_FILTERS_256 macro, below.
*/
+ #define ALIGN_FILTERS_256 1
+
static void convolve_horiz_c(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x0, int x_step_q4,
@@ -56,11 +58,12 @@ static void convolve_horiz_c(const uint8_t *src, int src_stride,
const int16_t *filter_x = filter_x0;
/* Initial phase offset */
- int x_q4 = (filter_x - filter_x_base) / taps;
+ int x0_q4 = (filter_x - filter_x_base) / taps;
+ int x_q4 = x0_q4;
for (x = 0; x < w; ++x) {
/* Per-pixel src offset */
- int src_x = x_q4 >> 4;
+ int src_x = (x_q4 - x0_q4) >> 4;
for (sum = 0, k = 0; k < taps; ++k) {
sum += src[src_x + k] * filter_x[k];
@@ -97,11 +100,12 @@ static void convolve_avg_horiz_c(const uint8_t *src, int src_stride,
const int16_t *filter_x = filter_x0;
/* Initial phase offset */
- int x_q4 = (filter_x - filter_x_base) / taps;
+ int x0_q4 = (filter_x - filter_x_base) / taps;
+ int x_q4 = x0_q4;
for (x = 0; x < w; ++x) {
/* Per-pixel src offset */
- int src_x = x_q4 >> 4;
+ int src_x = (x_q4 - x0_q4) >> 4;
for (sum = 0, k = 0; k < taps; ++k) {
sum += src[src_x + k] * filter_x[k];
@@ -138,11 +142,12 @@ static void convolve_vert_c(const uint8_t *src, int src_stride,
const int16_t *filter_y = filter_y0;
/* Initial phase offset */
- int y_q4 = (filter_y - filter_y_base) / taps;
+ int y0_q4 = (filter_y - filter_y_base) / taps;
+ int y_q4 = y0_q4;
for (y = 0; y < h; ++y) {
/* Per-pixel src offset */
- int src_y = y_q4 >> 4;
+ int src_y = (y_q4 - y0_q4) >> 4;
for (sum = 0, k = 0; k < taps; ++k) {
sum += src[(src_y + k) * src_stride] * filter_y[k];
@@ -179,11 +184,12 @@ static void convolve_avg_vert_c(const uint8_t *src, int src_stride,
const int16_t *filter_y = filter_y0;
/* Initial phase offset */
- int y_q4 = (filter_y - filter_y_base) / taps;
+ int y0_q4 = (filter_y - filter_y_base) / taps;
+ int y_q4 = y0_q4;
for (y = 0; y < h; ++y) {
/* Per-pixel src offset */
- int src_y = y_q4 >> 4;
+ int src_y = (y_q4 - y0_q4) >> 4;
for (sum = 0, k = 0; k < taps; ++k) {
sum += src[(src_y + k) * src_stride] * filter_y[k];
@@ -206,16 +212,25 @@ static void convolve_c(const uint8_t *src, int src_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h, int taps) {
- /* Fixed size intermediate buffer places limits on parameters. */
- uint8_t temp[16 * 23];
+ /* Fixed size intermediate buffer places limits on parameters.
+ * Maximum intermediate_height is 39, for y_step_q4 == 32,
+ * h == 16, taps == 8.
+ */
+ uint8_t temp[16 * 39];
+ int intermediate_height = ((h * y_step_q4) >> 4) + taps - 1;
+
assert(w <= 16);
assert(h <= 16);
assert(taps <= 8);
+ assert(y_step_q4 <= 32);
+
+ if (intermediate_height < h)
+ intermediate_height = h;
convolve_horiz_c(src - src_stride * (taps / 2 - 1), src_stride,
temp, 16,
filter_x, x_step_q4, filter_y, y_step_q4,
- w, h + taps - 1, taps);
+ w, intermediate_height, taps);
convolve_vert_c(temp + 16 * (taps / 2 - 1), 16, dst, dst_stride,
filter_x, x_step_q4, filter_y, y_step_q4,
w, h, taps);
@@ -226,16 +241,25 @@ static void convolve_avg_c(const uint8_t *src, int src_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h, int taps) {
- /* Fixed size intermediate buffer places limits on parameters. */
- uint8_t temp[16 * 23];
+ /* Fixed size intermediate buffer places limits on parameters.
+ * Maximum intermediate_height is 39, for y_step_q4 == 32,
+ * h == 16, taps == 8.
+ */
+ uint8_t temp[16 * 39];
+ int intermediate_height = ((h * y_step_q4) >> 4) + taps - 1;
+
assert(w <= 16);
assert(h <= 16);
assert(taps <= 8);
+ assert(y_step_q4 <= 32);
+
+ if (intermediate_height < h)
+ intermediate_height = h;
convolve_horiz_c(src - src_stride * (taps / 2 - 1), src_stride,
temp, 16,
filter_x, x_step_q4, filter_y, y_step_q4,
- w, h + taps - 1, taps);
+ w, intermediate_height, taps);
convolve_avg_vert_c(temp + 16 * (taps / 2 - 1), 16, dst, dst_stride,
filter_x, x_step_q4, filter_y, y_step_q4,
w, h, taps);
diff --git a/vp9/common/vp9_convolve.h b/vp9/common/vp9_convolve.h
index 46c935ab7..8c4856187 100644
--- a/vp9/common/vp9_convolve.h
+++ b/vp9/common/vp9_convolve.h
@@ -33,11 +33,8 @@ void vp9_convolve_avg(const uint8_t *src, int src_stride,
int w, int h);
struct subpix_fn_table {
- convolve_fn_t predict[2][2][2]; // horiz, vert, avg
const int16_t (*filter_x)[8];
const int16_t (*filter_y)[8];
- int x_step_q4;
- int y_step_q4;
};
#endif // VP9_COMMON_CONVOLVE_H_
diff --git a/vp9/common/vp9_filter.c b/vp9/common/vp9_filter.c
index 5e425895f..434c63e7e 100644
--- a/vp9/common/vp9_filter.c
+++ b/vp9/common/vp9_filter.c
@@ -15,7 +15,7 @@
#include "vp9_rtcd.h"
#include "vp9/common/vp9_common.h"
-DECLARE_ALIGNED(16, const int16_t, vp9_bilinear_filters[SUBPEL_SHIFTS][8]) = {
+DECLARE_ALIGNED(256, const int16_t, vp9_bilinear_filters[SUBPEL_SHIFTS][8]) = {
{ 0, 0, 0, 128, 0, 0, 0, 0 },
{ 0, 0, 0, 120, 8, 0, 0, 0 },
{ 0, 0, 0, 112, 16, 0, 0, 0 },
@@ -36,7 +36,8 @@ DECLARE_ALIGNED(16, const int16_t, vp9_bilinear_filters[SUBPEL_SHIFTS][8]) = {
#define FILTER_ALPHA 0
#define FILTER_ALPHA_SHARP 1
-DECLARE_ALIGNED(16, const int16_t, vp9_sub_pel_filters_8[SUBPEL_SHIFTS][8]) = {
+DECLARE_ALIGNED(256, const int16_t, vp9_sub_pel_filters_8[SUBPEL_SHIFTS][8])
+ = {
#if FILTER_ALPHA == 0
/* Lagrangian interpolation filter */
{ 0, 0, 0, 128, 0, 0, 0, 0},
@@ -55,6 +56,7 @@ DECLARE_ALIGNED(16, const int16_t, vp9_sub_pel_filters_8[SUBPEL_SHIFTS][8]) = {
{ -1, 3, -9, 27, 118, -13, 4, -1},
{ 0, 2, -6, 18, 122, -10, 3, -1},
{ 0, 1, -3, 8, 126, -5, 1, 0}
+
#elif FILTER_ALPHA == 50
/* Generated using MATLAB:
* alpha = 0.5;
@@ -82,7 +84,8 @@ DECLARE_ALIGNED(16, const int16_t, vp9_sub_pel_filters_8[SUBPEL_SHIFTS][8]) = {
#endif /* FILTER_ALPHA */
};
-DECLARE_ALIGNED(16, const int16_t, vp9_sub_pel_filters_8s[SUBPEL_SHIFTS][8]) = {
+DECLARE_ALIGNED(256, const int16_t, vp9_sub_pel_filters_8s[SUBPEL_SHIFTS][8])
+ = {
#if FILTER_ALPHA_SHARP == 1
/* dct based filter */
{0, 0, 0, 128, 0, 0, 0, 0},
@@ -101,6 +104,7 @@ DECLARE_ALIGNED(16, const int16_t, vp9_sub_pel_filters_8s[SUBPEL_SHIFTS][8]) = {
{-2, 5, -10, 27, 121, -17, 7, -3},
{-1, 3, -6, 17, 125, -13, 5, -2},
{0, 1, -3, 8, 127, -7, 3, -1}
+
#elif FILTER_ALPHA_SHARP == 75
/* alpha = 0.75 */
{0, 0, 0, 128, 0, 0, 0, 0},
@@ -122,7 +126,7 @@ DECLARE_ALIGNED(16, const int16_t, vp9_sub_pel_filters_8s[SUBPEL_SHIFTS][8]) = {
#endif /* FILTER_ALPHA_SHARP */
};
-DECLARE_ALIGNED(16, const int16_t,
+DECLARE_ALIGNED(256, const int16_t,
vp9_sub_pel_filters_8lp[SUBPEL_SHIFTS][8]) = {
/* 8-tap lowpass filter */
/* Hamming window */
@@ -144,7 +148,8 @@ DECLARE_ALIGNED(16, const int16_t,
{ 1, -2, -7, 37, 80, 28, -8, -1}
};
-DECLARE_ALIGNED(16, const int16_t, vp9_sub_pel_filters_6[SUBPEL_SHIFTS][8]) = {
+DECLARE_ALIGNED(256, const int16_t, vp9_sub_pel_filters_6[SUBPEL_SHIFTS][8])
+ = {
{0, 0, 0, 128, 0, 0, 0, 0},
{0, 1, -5, 125, 8, -2, 1, 0},
{0, 1, -8, 122, 17, -5, 1, 0},
diff --git a/vp9/common/vp9_idct.h b/vp9/common/vp9_idct.h
index 430cec083..3e0ee4b63 100644
--- a/vp9/common/vp9_idct.h
+++ b/vp9/common/vp9_idct.h
@@ -13,6 +13,13 @@
#include "./vpx_config.h"
+#define ROUND_POWER_OF_TWO(value, n) (((value) + (1 << ((n) - 1))) >> (n))
+
+/* If we don't want to use ROUND_POWER_OF_TWO macro
+static INLINE int16_t round_power_of_two(int16_t value, int n) {
+ return (value + (1 << (n - 1))) >> n;
+}*/
+
// Constants and Macros used by all idct/dct functions
#define DCT_CONST_BITS 14
#define DCT_CONST_ROUNDING (1 << (DCT_CONST_BITS - 1))
@@ -61,4 +68,11 @@ static INLINE int dct_const_round_shift(int input) {
assert(INT16_MIN <= rv && rv <= INT16_MAX);
return rv;
}
+
+static INLINE int dct_32_round(int input) {
+ int rv = (input + DCT_CONST_ROUNDING) >> DCT_CONST_BITS;
+ assert(-131072 <= rv && rv <= 131071);
+ return rv;
+}
+
#endif
diff --git a/vp9/common/vp9_idctllm.c b/vp9/common/vp9_idctllm.c
index 632dae8fd..f34823b36 100644
--- a/vp9/common/vp9_idctllm.c
+++ b/vp9/common/vp9_idctllm.c
@@ -31,13 +31,6 @@
#include "vp9/common/vp9_common.h"
#include "vp9/common/vp9_idct.h"
-#define ROUND_POWER_OF_TWO(value, n) (((value) + (1 << ((n) - 1))) >> (n))
-
-/* If we don't want to use ROUND_POWER_OF_TWO macro
-static INLINE int16_t round_power_of_two(int16_t value, int n) {
- return (value + (1 << (n - 1))) >> n;
-}*/
-
typedef void (*transform_1d)(int16_t*, int16_t*);
typedef struct {
diff --git a/vp9/common/vp9_invtrans.c b/vp9/common/vp9_invtrans.c
index d431ea24b..1311b9111 100644
--- a/vp9/common/vp9_invtrans.c
+++ b/vp9/common/vp9_invtrans.c
@@ -13,7 +13,7 @@
void vp9_inverse_transform_b_4x4(MACROBLOCKD *xd, int block, int pitch) {
BLOCKD *b = &xd->block[block];
- if (b->eob <= 1)
+ if (xd->eobs[block] <= 1)
xd->inv_txm4x4_1(b->dqcoeff, b->diff, pitch);
else
xd->inv_txm4x4(b->dqcoeff, b->diff, pitch);
diff --git a/vp9/common/vp9_mbpitch.c b/vp9/common/vp9_mbpitch.c
index b3303eb59..ed96292a4 100644
--- a/vp9/common/vp9_mbpitch.c
+++ b/vp9/common/vp9_mbpitch.c
@@ -71,17 +71,6 @@ static void setup_macroblock(MACROBLOCKD *xd, BLOCKSET bs) {
setup_block(&blockd[block + 4], stride, v, v2, stride,
((block - 16) >> 1) * 4 * stride + (block & 1) * 4, bs);
}
-
- // TODO(jkoleszar): this will move once we're actually scaling.
- xd->scale_factor[0].x_num = 1;
- xd->scale_factor[0].x_den = 1;
- xd->scale_factor[0].y_num = 1;
- xd->scale_factor[0].y_den = 1;
- xd->scale_factor[0].x_offset_q4 = 0;
- xd->scale_factor[0].y_offset_q4 = 0;
- xd->scale_factor[1]= xd->scale_factor[0];
- xd->scale_factor_uv[0] = xd->scale_factor[0];
- xd->scale_factor_uv[1] = xd->scale_factor[1];
}
void vp9_setup_block_dptrs(MACROBLOCKD *xd) {
diff --git a/vp9/common/vp9_onyxc_int.h b/vp9/common/vp9_onyxc_int.h
index e952fe933..c4bb12340 100644
--- a/vp9/common/vp9_onyxc_int.h
+++ b/vp9/common/vp9_onyxc_int.h
@@ -39,7 +39,11 @@ void vp9_initialize_common(void);
#define NUM_REF_FRAMES 3
#define NUM_REF_FRAMES_LG2 2
-#define NUM_YV12_BUFFERS (NUM_REF_FRAMES + 1)
+
+// 1 scratch frame for the new frame, 3 for scaled references on the encoder
+// TODO(jkoleszar): These 3 extra references could probably come from the
+// normal reference pool.
+#define NUM_YV12_BUFFERS (NUM_REF_FRAMES + 4)
#define NUM_FRAME_CONTEXTS_LG2 2
#define NUM_FRAME_CONTEXTS (1 << NUM_FRAME_CONTEXTS_LG2)
@@ -128,6 +132,8 @@ typedef struct VP9Common {
int Width;
int Height;
+ int last_width;
+ int last_height;
int horiz_scale;
int vert_scale;
@@ -145,6 +151,7 @@ typedef struct VP9Common {
*/
int active_ref_idx[3]; /* each frame can reference 3 buffers */
int new_fb_idx;
+ struct scale_factors active_ref_scale[3];
YV12_BUFFER_CONFIG post_proc_buffer;
YV12_BUFFER_CONFIG temp_scale_frame;
diff --git a/vp9/common/vp9_reconinter.c b/vp9/common/vp9_reconinter.c
index 3b4b34216..30e8951af 100644
--- a/vp9/common/vp9_reconinter.c
+++ b/vp9/common/vp9_reconinter.c
@@ -17,26 +17,97 @@
#include "vp9/common/vp9_reconinter.h"
#include "vp9/common/vp9_reconintra.h"
-void vp9_setup_interp_filters(MACROBLOCKD *xd,
- INTERPOLATIONFILTERTYPE mcomp_filter_type,
- VP9_COMMON *cm) {
+void vp9_setup_scale_factors_for_frame(struct scale_factors *scale,
+ YV12_BUFFER_CONFIG *other,
+ int this_w, int this_h) {
+ int other_w, other_h;
+
+ other_h = other->y_height;
+ other_w = other->y_width;
+ scale->x_num = other_w;
+ scale->x_den = this_w;
+ scale->x_offset_q4 = 0; // calculated per-mb
+ scale->x_step_q4 = 16 * other_w / this_w;
+ scale->y_num = other_h;
+ scale->y_den = this_h;
+ scale->y_offset_q4 = 0; // calculated per-mb
+ scale->y_step_q4 = 16 * other_h / this_h;
+
// TODO(agrange): Investigate the best choice of functions to use here
// for EIGHTTAP_SMOOTH. Since it is not interpolating, need to choose what
// to do at full-pel offsets. The current selection, where the filter is
// applied in one direction only, and not at all for 0,0, seems to give the
// best quality, but it may be worth trying an additional mode that does
// do the filtering on full-pel.
- xd->subpix.predict[0][0][0] = vp9_convolve_copy;
- xd->subpix.predict[0][0][1] = vp9_convolve_avg;
- xd->subpix.predict[0][1][0] = vp9_convolve8_vert;
- xd->subpix.predict[0][1][1] = vp9_convolve8_avg_vert;
- xd->subpix.predict[1][0][0] = vp9_convolve8_horiz;
- xd->subpix.predict[1][0][1] = vp9_convolve8_avg_horiz;
- xd->subpix.predict[1][1][0] = vp9_convolve8;
- xd->subpix.predict[1][1][1] = vp9_convolve8_avg;
-
- xd->subpix.x_step_q4 = 16;
- xd->subpix.y_step_q4 = 16;
+ if (scale->x_step_q4 == 16) {
+ if (scale->y_step_q4 == 16) {
+ // No scaling in either direction.
+ scale->predict[0][0][0] = vp9_convolve_copy;
+ scale->predict[0][0][1] = vp9_convolve_avg;
+ scale->predict[0][1][0] = vp9_convolve8_vert;
+ scale->predict[0][1][1] = vp9_convolve8_avg_vert;
+ scale->predict[1][0][0] = vp9_convolve8_horiz;
+ scale->predict[1][0][1] = vp9_convolve8_avg_horiz;
+ } else {
+ // No scaling in x direction. Must always scale in the y direction.
+ scale->predict[0][0][0] = vp9_convolve8_vert;
+ scale->predict[0][0][1] = vp9_convolve8_avg_vert;
+ scale->predict[0][1][0] = vp9_convolve8_vert;
+ scale->predict[0][1][1] = vp9_convolve8_avg_vert;
+ scale->predict[1][0][0] = vp9_convolve8;
+ scale->predict[1][0][1] = vp9_convolve8_avg;
+ }
+ } else {
+ if (scale->y_step_q4 == 16) {
+ // No scaling in the y direction. Must always scale in the x direction.
+ scale->predict[0][0][0] = vp9_convolve8_horiz;
+ scale->predict[0][0][1] = vp9_convolve8_avg_horiz;
+ scale->predict[0][1][0] = vp9_convolve8;
+ scale->predict[0][1][1] = vp9_convolve8_avg;
+ scale->predict[1][0][0] = vp9_convolve8_horiz;
+ scale->predict[1][0][1] = vp9_convolve8_avg_horiz;
+ } else {
+ // Must always scale in both directions.
+ scale->predict[0][0][0] = vp9_convolve8;
+ scale->predict[0][0][1] = vp9_convolve8_avg;
+ scale->predict[0][1][0] = vp9_convolve8;
+ scale->predict[0][1][1] = vp9_convolve8_avg;
+ scale->predict[1][0][0] = vp9_convolve8;
+ scale->predict[1][0][1] = vp9_convolve8_avg;
+ }
+ }
+ // 2D subpel motion always gets filtered in both directions
+ scale->predict[1][1][0] = vp9_convolve8;
+ scale->predict[1][1][1] = vp9_convolve8_avg;
+}
+
+void vp9_setup_interp_filters(MACROBLOCKD *xd,
+ INTERPOLATIONFILTERTYPE mcomp_filter_type,
+ VP9_COMMON *cm) {
+ int i;
+
+ /* Calculate scaling factors for each of the 3 available references */
+ for (i = 0; i < 3; ++i) {
+ if (cm->active_ref_idx[i] >= NUM_YV12_BUFFERS) {
+ memset(&cm->active_ref_scale[i], 0, sizeof(cm->active_ref_scale[i]));
+ continue;
+ }
+
+ vp9_setup_scale_factors_for_frame(&cm->active_ref_scale[i],
+ &cm->yv12_fb[cm->active_ref_idx[i]],
+ cm->mb_cols * 16, cm->mb_rows * 16);
+ }
+
+ if (xd->mode_info_context) {
+ MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
+
+ set_scale_factors(xd,
+ mbmi->ref_frame - 1,
+ mbmi->second_ref_frame - 1,
+ cm->active_ref_scale);
+ }
+
+
switch (mcomp_filter_type) {
case EIGHTTAP:
case SWITCHABLE:
@@ -57,6 +128,7 @@ void vp9_setup_interp_filters(MACROBLOCKD *xd,
break;
#endif
}
+ assert(((intptr_t)xd->subpix.filter_x & 0xff) == 0);
}
void vp9_copy_mem16x16_c(const uint8_t *src,
@@ -146,30 +218,50 @@ void vp9_copy_mem8x4_c(const uint8_t *src,
}
}
-static int32_t scale_motion_vector_component(int mv,
- int num,
- int den,
- int offset_q4) {
+static void set_scaled_offsets(struct scale_factors *scale,
+ int row, int col) {
+ const int x_q4 = 16 * col;
+ const int y_q4 = 16 * row;
+
+ scale->x_offset_q4 = (x_q4 * scale->x_num / scale->x_den) & 0xf;
+ scale->y_offset_q4 = (y_q4 * scale->y_num / scale->y_den) & 0xf;
+}
+
+static int32_t scale_motion_vector_component_q3(int mv_q3,
+ int num,
+ int den,
+ int offset_q4) {
+ // returns the scaled and offset value of the mv component.
+ const int32_t mv_q4 = mv_q3 << 1;
+
+ /* TODO(jkoleszar): make fixed point, or as a second multiply? */
+ return mv_q4 * num / den + offset_q4;
+}
+
+static int32_t scale_motion_vector_component_q4(int mv_q4,
+ int num,
+ int den,
+ int offset_q4) {
// returns the scaled and offset value of the mv component.
- // input and output mv have the same units -- this would work with either q3
- // or q4 motion vectors. Offset is given as a q4 fractional number.
- const int32_t mv_q4 = mv * 16;
/* TODO(jkoleszar): make fixed point, or as a second multiply? */
- return (mv_q4 * num / den + offset_q4 + 8) >> 4;
+ return mv_q4 * num / den + offset_q4;
}
-static int_mv32 scale_motion_vector(const int_mv *src_mv,
- const struct scale_factors *scale) {
+static int_mv32 scale_motion_vector_q3_to_q4(
+ const int_mv *src_mv,
+ const struct scale_factors *scale) {
// returns mv * scale + offset
int_mv32 result;
- result.as_mv.row = scale_motion_vector_component(src_mv->as_mv.row,
- scale->y_num, scale->y_den,
- scale->y_offset_q4);
- result.as_mv.col = scale_motion_vector_component(src_mv->as_mv.col,
- scale->x_num, scale->x_den,
- scale->x_offset_q4);
+ result.as_mv.row = scale_motion_vector_component_q3(src_mv->as_mv.row,
+ scale->y_num,
+ scale->y_den,
+ scale->y_offset_q4);
+ result.as_mv.col = scale_motion_vector_component_q3(src_mv->as_mv.col,
+ scale->x_num,
+ scale->x_den,
+ scale->x_offset_q4);
return result;
}
@@ -181,12 +273,13 @@ void vp9_build_inter_predictor(const uint8_t *src, int src_stride,
const struct subpix_fn_table *subpix) {
int_mv32 mv;
- mv = scale_motion_vector(mv_q3, scale);
- src = src + (mv.as_mv.row >> 3) * src_stride + (mv.as_mv.col >> 3);
- subpix->predict[!!(mv.as_mv.col & 7)][!!(mv.as_mv.row & 7)][do_avg](
+ mv = scale_motion_vector_q3_to_q4(mv_q3, scale);
+ src = src + (mv.as_mv.row >> 4) * src_stride + (mv.as_mv.col >> 4);
+
+ scale->predict[!!(mv.as_mv.col & 15)][!!(mv.as_mv.row & 15)][do_avg](
src, src_stride, dst, dst_stride,
- subpix->filter_x[(mv.as_mv.col & 7) << 1], subpix->x_step_q4,
- subpix->filter_y[(mv.as_mv.row & 7) << 1], subpix->y_step_q4,
+ subpix->filter_x[mv.as_mv.col & 15], scale->x_step_q4,
+ subpix->filter_y[mv.as_mv.row & 15], scale->y_step_q4,
w, h);
}
@@ -205,29 +298,32 @@ void vp9_build_inter_predictor_q4(const uint8_t *src, int src_stride,
const int mv_col_q4 = ((fullpel_mv_q3->as_mv.col >> 3) << 4)
+ (frac_mv_q4->as_mv.col & 0xf);
const int scaled_mv_row_q4 =
- scale_motion_vector_component(mv_row_q4, scale->y_num, scale->y_den,
- scale->y_offset_q4);
+ scale_motion_vector_component_q4(mv_row_q4, scale->y_num, scale->y_den,
+ scale->y_offset_q4);
const int scaled_mv_col_q4 =
- scale_motion_vector_component(mv_col_q4, scale->x_num, scale->x_den,
- scale->x_offset_q4);
+ scale_motion_vector_component_q4(mv_col_q4, scale->x_num, scale->x_den,
+ scale->x_offset_q4);
const int subpel_x = scaled_mv_col_q4 & 15;
const int subpel_y = scaled_mv_row_q4 & 15;
src = src + (scaled_mv_row_q4 >> 4) * src_stride + (scaled_mv_col_q4 >> 4);
- subpix->predict[!!subpel_x][!!subpel_y][do_avg](
+ scale->predict[!!subpel_x][!!subpel_y][do_avg](
src, src_stride, dst, dst_stride,
- subpix->filter_x[subpel_x], subpix->x_step_q4,
- subpix->filter_y[subpel_y], subpix->y_step_q4,
+ subpix->filter_x[subpel_x], scale->x_step_q4,
+ subpix->filter_y[subpel_y], scale->y_step_q4,
w, h);
}
static void build_2x1_inter_predictor(const BLOCKD *d0, const BLOCKD *d1,
- const struct scale_factors *scale,
+ struct scale_factors *scale,
int block_size, int stride, int which_mv,
- const struct subpix_fn_table *subpix) {
+ const struct subpix_fn_table *subpix,
+ int row, int col) {
assert(d1->predictor - d0->predictor == block_size);
assert(d1->pre == d0->pre + block_size);
+ set_scaled_offsets(&scale[which_mv], row, col);
+
if (d0->bmi.as_mv[which_mv].as_int == d1->bmi.as_mv[which_mv].as_int) {
uint8_t **base_pre = which_mv ? d0->base_second_pre : d0->base_pre;
@@ -250,6 +346,9 @@ static void build_2x1_inter_predictor(const BLOCKD *d0, const BLOCKD *d1,
&scale[which_mv],
block_size, block_size, which_mv,
subpix);
+
+ set_scaled_offsets(&scale[which_mv], row, col + block_size);
+
vp9_build_inter_predictor(*base_pre1 + d1->pre,
d1->pre_stride,
d1->predictor, stride,
@@ -261,7 +360,9 @@ static void build_2x1_inter_predictor(const BLOCKD *d0, const BLOCKD *d1,
}
/*encoder only*/
-void vp9_build_inter4x4_predictors_mbuv(MACROBLOCKD *xd) {
+void vp9_build_inter4x4_predictors_mbuv(MACROBLOCKD *xd,
+ int mb_row,
+ int mb_col) {
int i, j;
BLOCKD *blockd = xd->block;
@@ -339,13 +440,16 @@ void vp9_build_inter4x4_predictors_mbuv(MACROBLOCKD *xd) {
for (i = 16; i < 24; i += 2) {
const int use_second_ref = xd->mode_info_context->mbmi.second_ref_frame > 0;
+ const int x = 4 * (i & 1);
+ const int y = ((i - 16) >> 1) * 4;
+
int which_mv;
BLOCKD *d0 = &blockd[i];
BLOCKD *d1 = &blockd[i + 1];
for (which_mv = 0; which_mv < 1 + use_second_ref; ++which_mv) {
build_2x1_inter_predictor(d0, d1, xd->scale_factor_uv, 4, 8, which_mv,
- &xd->subpix);
+ &xd->subpix, mb_row * 8 + y, mb_col * 8 + x);
}
}
}
@@ -389,7 +493,9 @@ static void clamp_uvmv_to_umv_border(MV *mv, const MACROBLOCKD *xd) {
/*encoder only*/
void vp9_build_inter16x16_predictors_mby(MACROBLOCKD *xd,
uint8_t *dst_y,
- int dst_ystride) {
+ int dst_ystride,
+ int mb_row,
+ int mb_col) {
const int use_second_ref = xd->mode_info_context->mbmi.second_ref_frame > 0;
int which_mv;
@@ -399,14 +505,19 @@ void vp9_build_inter16x16_predictors_mby(MACROBLOCKD *xd,
: xd->mode_info_context->mbmi.need_to_clamp_mvs;
uint8_t *base_pre;
int_mv ymv;
+ int pre_stride;
ymv.as_int = xd->mode_info_context->mbmi.mv[which_mv].as_int;
base_pre = which_mv ? xd->second_pre.y_buffer
: xd->pre.y_buffer;
+ pre_stride = which_mv ? xd->second_pre.y_stride
+ : xd->pre.y_stride;
if (clamp_mvs)
clamp_mv_to_umv_border(&ymv.as_mv, xd);
- vp9_build_inter_predictor(base_pre, xd->block[0].pre_stride,
+ set_scaled_offsets(&xd->scale_factor[which_mv], mb_row * 16, mb_col * 16);
+
+ vp9_build_inter_predictor(base_pre, pre_stride,
dst_y, dst_ystride,
&ymv, &xd->scale_factor[which_mv],
16, 16, which_mv, &xd->subpix);
@@ -416,7 +527,9 @@ void vp9_build_inter16x16_predictors_mby(MACROBLOCKD *xd,
void vp9_build_inter16x16_predictors_mbuv(MACROBLOCKD *xd,
uint8_t *dst_u,
uint8_t *dst_v,
- int dst_uvstride) {
+ int dst_uvstride,
+ int mb_row,
+ int mb_col) {
const int use_second_ref = xd->mode_info_context->mbmi.second_ref_frame > 0;
int which_mv;
@@ -425,7 +538,8 @@ void vp9_build_inter16x16_predictors_mbuv(MACROBLOCKD *xd,
which_mv ? xd->mode_info_context->mbmi.need_to_clamp_secondmv
: xd->mode_info_context->mbmi.need_to_clamp_mvs;
uint8_t *uptr, *vptr;
- int pre_stride = xd->block[0].pre_stride;
+ int pre_stride = which_mv ? xd->second_pre.y_stride
+ : xd->pre.y_stride;
int_mv _o16x16mv;
int_mv _16x16mv;
@@ -456,6 +570,9 @@ void vp9_build_inter16x16_predictors_mbuv(MACROBLOCKD *xd,
uptr = (which_mv ? xd->second_pre.u_buffer : xd->pre.u_buffer);
vptr = (which_mv ? xd->second_pre.v_buffer : xd->pre.v_buffer);
+ set_scaled_offsets(&xd->scale_factor_uv[which_mv],
+ mb_row * 16, mb_col * 16);
+
vp9_build_inter_predictor_q4(uptr, pre_stride,
dst_u, dst_uvstride,
&_16x16mv, &_o16x16mv,
@@ -475,7 +592,9 @@ void vp9_build_inter32x32_predictors_sb(MACROBLOCKD *x,
uint8_t *dst_u,
uint8_t *dst_v,
int dst_ystride,
- int dst_uvstride) {
+ int dst_uvstride,
+ int mb_row,
+ int mb_col) {
uint8_t *y1 = x->pre.y_buffer, *u1 = x->pre.u_buffer, *v1 = x->pre.v_buffer;
uint8_t *y2 = x->second_pre.y_buffer, *u2 = x->second_pre.u_buffer,
*v2 = x->second_pre.v_buffer;
@@ -488,27 +607,43 @@ void vp9_build_inter32x32_predictors_sb(MACROBLOCKD *x,
for (n = 0; n < 4; n++) {
const int x_idx = n & 1, y_idx = n >> 1;
+ int scaled_uv_offset;
x->mb_to_top_edge = edge[0] - ((y_idx * 16) << 3);
x->mb_to_bottom_edge = edge[1] + (((1 - y_idx) * 16) << 3);
x->mb_to_left_edge = edge[2] - ((x_idx * 16) << 3);
x->mb_to_right_edge = edge[3] + (((1 - x_idx) * 16) << 3);
- x->pre.y_buffer = y1 + y_idx * 16 * x->pre.y_stride + x_idx * 16;
- x->pre.u_buffer = u1 + y_idx * 8 * x->pre.uv_stride + x_idx * 8;
- x->pre.v_buffer = v1 + y_idx * 8 * x->pre.uv_stride + x_idx * 8;
+ x->pre.y_buffer = y1 + scaled_buffer_offset(x_idx * 16,
+ y_idx * 16,
+ x->pre.y_stride,
+ &x->scale_factor[0]);
+ scaled_uv_offset = scaled_buffer_offset(x_idx * 8,
+ y_idx * 8,
+ x->pre.uv_stride,
+ &x->scale_factor_uv[0]);
+ x->pre.u_buffer = u1 + scaled_uv_offset;
+ x->pre.v_buffer = v1 + scaled_uv_offset;
if (x->mode_info_context->mbmi.second_ref_frame > 0) {
- x->second_pre.y_buffer = y2 + y_idx * 16 * x->pre.y_stride + x_idx * 16;
- x->second_pre.u_buffer = u2 + y_idx * 8 * x->pre.uv_stride + x_idx * 8;
- x->second_pre.v_buffer = v2 + y_idx * 8 * x->pre.uv_stride + x_idx * 8;
+ x->second_pre.y_buffer = y2 +
+ scaled_buffer_offset(x_idx * 16,
+ y_idx * 16,
+ x->second_pre.y_stride,
+ &x->scale_factor[1]);
+ scaled_uv_offset = scaled_buffer_offset(x_idx * 8,
+ y_idx * 8,
+ x->second_pre.uv_stride,
+ &x->scale_factor_uv[1]);
+ x->second_pre.u_buffer = u2 + scaled_uv_offset;
+ x->second_pre.v_buffer = v2 + scaled_uv_offset;
}
vp9_build_inter16x16_predictors_mb(x,
dst_y + y_idx * 16 * dst_ystride + x_idx * 16,
dst_u + y_idx * 8 * dst_uvstride + x_idx * 8,
dst_v + y_idx * 8 * dst_uvstride + x_idx * 8,
- dst_ystride, dst_uvstride);
+ dst_ystride, dst_uvstride, mb_row + y_idx, mb_col + x_idx);
}
x->mb_to_top_edge = edge[0];
@@ -539,7 +674,9 @@ void vp9_build_inter64x64_predictors_sb(MACROBLOCKD *x,
uint8_t *dst_u,
uint8_t *dst_v,
int dst_ystride,
- int dst_uvstride) {
+ int dst_uvstride,
+ int mb_row,
+ int mb_col) {
uint8_t *y1 = x->pre.y_buffer, *u1 = x->pre.u_buffer, *v1 = x->pre.v_buffer;
uint8_t *y2 = x->second_pre.y_buffer, *u2 = x->second_pre.u_buffer,
*v2 = x->second_pre.v_buffer;
@@ -552,27 +689,43 @@ void vp9_build_inter64x64_predictors_sb(MACROBLOCKD *x,
for (n = 0; n < 4; n++) {
const int x_idx = n & 1, y_idx = n >> 1;
+ int scaled_uv_offset;
x->mb_to_top_edge = edge[0] - ((y_idx * 32) << 3);
x->mb_to_bottom_edge = edge[1] + (((1 - y_idx) * 32) << 3);
x->mb_to_left_edge = edge[2] - ((x_idx * 32) << 3);
x->mb_to_right_edge = edge[3] + (((1 - x_idx) * 32) << 3);
- x->pre.y_buffer = y1 + y_idx * 32 * x->pre.y_stride + x_idx * 32;
- x->pre.u_buffer = u1 + y_idx * 16 * x->pre.uv_stride + x_idx * 16;
- x->pre.v_buffer = v1 + y_idx * 16 * x->pre.uv_stride + x_idx * 16;
+ x->pre.y_buffer = y1 + scaled_buffer_offset(x_idx * 32,
+ y_idx * 32,
+ x->pre.y_stride,
+ &x->scale_factor[0]);
+ scaled_uv_offset = scaled_buffer_offset(x_idx * 16,
+ y_idx * 16,
+ x->pre.uv_stride,
+ &x->scale_factor_uv[0]);
+ x->pre.u_buffer = u1 + scaled_uv_offset;
+ x->pre.v_buffer = v1 + scaled_uv_offset;
if (x->mode_info_context->mbmi.second_ref_frame > 0) {
- x->second_pre.y_buffer = y2 + y_idx * 32 * x->pre.y_stride + x_idx * 32;
- x->second_pre.u_buffer = u2 + y_idx * 16 * x->pre.uv_stride + x_idx * 16;
- x->second_pre.v_buffer = v2 + y_idx * 16 * x->pre.uv_stride + x_idx * 16;
+ x->second_pre.y_buffer = y2 +
+ scaled_buffer_offset(x_idx * 32,
+ y_idx * 32,
+ x->second_pre.y_stride,
+ &x->scale_factor[1]);
+ scaled_uv_offset = scaled_buffer_offset(x_idx * 16,
+ y_idx * 16,
+ x->second_pre.uv_stride,
+ &x->scale_factor_uv[1]);
+ x->second_pre.u_buffer = u2 + scaled_uv_offset;
+ x->second_pre.v_buffer = v2 + scaled_uv_offset;
}
vp9_build_inter32x32_predictors_sb(x,
dst_y + y_idx * 32 * dst_ystride + x_idx * 32,
dst_u + y_idx * 16 * dst_uvstride + x_idx * 16,
dst_v + y_idx * 16 * dst_uvstride + x_idx * 16,
- dst_ystride, dst_uvstride);
+ dst_ystride, dst_uvstride, mb_row + y_idx * 2, mb_col + x_idx * 2);
}
x->mb_to_top_edge = edge[0];
@@ -598,7 +751,8 @@ void vp9_build_inter64x64_predictors_sb(MACROBLOCKD *x,
#endif
}
-static void build_inter4x4_predictors_mb(MACROBLOCKD *xd) {
+static void build_inter4x4_predictors_mb(MACROBLOCKD *xd,
+ int mb_row, int mb_col) {
int i;
MB_MODE_INFO * mbmi = &xd->mode_info_context->mbmi;
BLOCKD *blockd = xd->block;
@@ -609,6 +763,7 @@ static void build_inter4x4_predictors_mb(MACROBLOCKD *xd) {
for (i = 0; i < 16; i += 8) {
BLOCKD *d0 = &blockd[i];
BLOCKD *d1 = &blockd[i + 2];
+ const int y = i & 8;
blockd[i + 0].bmi = xd->mode_info_context->bmi[i + 0];
blockd[i + 2].bmi = xd->mode_info_context->bmi[i + 2];
@@ -619,44 +774,25 @@ static void build_inter4x4_predictors_mb(MACROBLOCKD *xd) {
clamp_mv_to_umv_border(&blockd[i + 2].bmi.as_mv[which_mv].as_mv, xd);
}
- /* TODO(jkoleszar): Enabling this for EIGHTTAP_SMOOTH changes the
- * result slightly, for reasons that are not immediately obvious to me.
- * It probably makes sense to enable this for all filter types to be
- * consistent with the way we do 8x4 below. Leaving disabled for now.
- */
- if (mbmi->interp_filter != EIGHTTAP_SMOOTH) {
- build_2x1_inter_predictor(d0, d1, xd->scale_factor, 8, 16,
- which_mv, &xd->subpix);
- } else {
- uint8_t **base_pre0 = which_mv ? d0->base_second_pre : d0->base_pre;
- uint8_t **base_pre1 = which_mv ? d1->base_second_pre : d1->base_pre;
-
- vp9_build_inter_predictor(*base_pre0 + d0->pre,
- d0->pre_stride,
- d0->predictor, 16,
- &d0->bmi.as_mv[which_mv],
- &xd->scale_factor[which_mv],
- 8, 8, which_mv, &xd->subpix);
- vp9_build_inter_predictor(*base_pre1 + d1->pre,
- d1->pre_stride,
- d1->predictor, 16,
- &d1->bmi.as_mv[which_mv],
- &xd->scale_factor[which_mv],
- 8, 8, which_mv, &xd->subpix);
- }
+ build_2x1_inter_predictor(d0, d1, xd->scale_factor, 8, 16,
+ which_mv, &xd->subpix,
+ mb_row * 16 + y, mb_col * 16);
}
}
} else {
for (i = 0; i < 16; i += 2) {
BLOCKD *d0 = &blockd[i];
BLOCKD *d1 = &blockd[i + 1];
+ const int x = (i & 3) * 4;
+ const int y = (i >> 2) * 4;
blockd[i + 0].bmi = xd->mode_info_context->bmi[i + 0];
blockd[i + 1].bmi = xd->mode_info_context->bmi[i + 1];
for (which_mv = 0; which_mv < 1 + use_second_ref; ++which_mv) {
build_2x1_inter_predictor(d0, d1, xd->scale_factor, 4, 16,
- which_mv, &xd->subpix);
+ which_mv, &xd->subpix,
+ mb_row * 16 + y, mb_col * 16 + x);
}
}
}
@@ -664,10 +800,13 @@ static void build_inter4x4_predictors_mb(MACROBLOCKD *xd) {
for (i = 16; i < 24; i += 2) {
BLOCKD *d0 = &blockd[i];
BLOCKD *d1 = &blockd[i + 1];
+ const int x = 4 * (i & 1);
+ const int y = ((i - 16) >> 1) * 4;
for (which_mv = 0; which_mv < 1 + use_second_ref; ++which_mv) {
build_2x1_inter_predictor(d0, d1, xd->scale_factor_uv, 4, 8,
- which_mv, &xd->subpix);
+ which_mv, &xd->subpix,
+ mb_row * 8 + y, mb_col * 8 + x);
}
}
}
@@ -769,17 +908,23 @@ void vp9_build_inter16x16_predictors_mb(MACROBLOCKD *xd,
uint8_t *dst_u,
uint8_t *dst_v,
int dst_ystride,
- int dst_uvstride) {
- vp9_build_inter16x16_predictors_mby(xd, dst_y, dst_ystride);
- vp9_build_inter16x16_predictors_mbuv(xd, dst_u, dst_v, dst_uvstride);
+ int dst_uvstride,
+ int mb_row,
+ int mb_col) {
+ vp9_build_inter16x16_predictors_mby(xd, dst_y, dst_ystride, mb_row, mb_col);
+ vp9_build_inter16x16_predictors_mbuv(xd, dst_u, dst_v, dst_uvstride,
+ mb_row, mb_col);
}
-void vp9_build_inter_predictors_mb(MACROBLOCKD *xd) {
+void vp9_build_inter_predictors_mb(MACROBLOCKD *xd,
+ int mb_row,
+ int mb_col) {
if (xd->mode_info_context->mbmi.mode != SPLITMV) {
vp9_build_inter16x16_predictors_mb(xd, xd->predictor,
&xd->predictor[256],
- &xd->predictor[320], 16, 8);
+ &xd->predictor[320], 16, 8,
+ mb_row, mb_col);
#if CONFIG_COMP_INTERINTRA_PRED
if (xd->mode_info_context->mbmi.second_ref_frame == INTRA_FRAME) {
@@ -790,6 +935,6 @@ void vp9_build_inter_predictors_mb(MACROBLOCKD *xd) {
#endif
} else {
build_4x4uvmvs(xd);
- build_inter4x4_predictors_mb(xd);
+ build_inter4x4_predictors_mb(xd, mb_row, mb_col);
}
}
diff --git a/vp9/common/vp9_reconinter.h b/vp9/common/vp9_reconinter.h
index 43f716427..831ce2a73 100644
--- a/vp9/common/vp9_reconinter.h
+++ b/vp9/common/vp9_reconinter.h
@@ -16,43 +16,61 @@
struct subpix_fn_table;
-extern void vp9_build_inter16x16_predictors_mby(MACROBLOCKD *xd,
- uint8_t *dst_y,
- int dst_ystride);
-
-extern void vp9_build_inter16x16_predictors_mbuv(MACROBLOCKD *xd,
- uint8_t *dst_u,
- uint8_t *dst_v,
- int dst_uvstride);
-
-extern void vp9_build_inter16x16_predictors_mb(MACROBLOCKD *xd,
- uint8_t *dst_y,
- uint8_t *dst_u,
- uint8_t *dst_v,
- int dst_ystride,
- int dst_uvstride);
-
-extern void vp9_build_inter32x32_predictors_sb(MACROBLOCKD *x,
- uint8_t *dst_y,
- uint8_t *dst_u,
- uint8_t *dst_v,
- int dst_ystride,
- int dst_uvstride);
-
-extern void vp9_build_inter64x64_predictors_sb(MACROBLOCKD *x,
- uint8_t *dst_y,
- uint8_t *dst_u,
- uint8_t *dst_v,
- int dst_ystride,
- int dst_uvstride);
-
-extern void vp9_build_inter_predictors_mb(MACROBLOCKD *xd);
-
-extern void vp9_build_inter4x4_predictors_mbuv(MACROBLOCKD *xd);
-
-extern void vp9_setup_interp_filters(MACROBLOCKD *xd,
- INTERPOLATIONFILTERTYPE filter,
- VP9_COMMON *cm);
+void vp9_build_inter16x16_predictors_mby(MACROBLOCKD *xd,
+ uint8_t *dst_y,
+ int dst_ystride,
+ int mb_row,
+ int mb_col);
+
+void vp9_build_inter16x16_predictors_mbuv(MACROBLOCKD *xd,
+ uint8_t *dst_u,
+ uint8_t *dst_v,
+ int dst_uvstride,
+ int mb_row,
+ int mb_col);
+
+void vp9_build_inter16x16_predictors_mb(MACROBLOCKD *xd,
+ uint8_t *dst_y,
+ uint8_t *dst_u,
+ uint8_t *dst_v,
+ int dst_ystride,
+ int dst_uvstride,
+ int mb_row,
+ int mb_col);
+
+void vp9_build_inter32x32_predictors_sb(MACROBLOCKD *x,
+ uint8_t *dst_y,
+ uint8_t *dst_u,
+ uint8_t *dst_v,
+ int dst_ystride,
+ int dst_uvstride,
+ int mb_row,
+ int mb_col);
+
+void vp9_build_inter64x64_predictors_sb(MACROBLOCKD *x,
+ uint8_t *dst_y,
+ uint8_t *dst_u,
+ uint8_t *dst_v,
+ int dst_ystride,
+ int dst_uvstride,
+ int mb_row,
+ int mb_col);
+
+void vp9_build_inter_predictors_mb(MACROBLOCKD *xd,
+ int mb_row,
+ int mb_col);
+
+void vp9_build_inter4x4_predictors_mbuv(MACROBLOCKD *xd,
+ int mb_row,
+ int mb_col);
+
+void vp9_setup_interp_filters(MACROBLOCKD *xd,
+ INTERPOLATIONFILTERTYPE filter,
+ VP9_COMMON *cm);
+
+void vp9_setup_scale_factors_for_frame(struct scale_factors *scale,
+ YV12_BUFFER_CONFIG *other,
+ int this_w, int this_h);
void vp9_build_inter_predictor(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
@@ -68,4 +86,56 @@ void vp9_build_inter_predictor_q4(const uint8_t *src, int src_stride,
const struct scale_factors *scale,
int w, int h, int do_avg,
const struct subpix_fn_table *subpix);
+
+static int scale_value_x(int val, const struct scale_factors *scale) {
+ return val * scale->x_num / scale->x_den;
+}
+
+static int scale_value_y(int val, const struct scale_factors *scale) {
+ return val * scale->y_num / scale->y_den;
+}
+
+static int scaled_buffer_offset(int x_offset,
+ int y_offset,
+ int stride,
+ const struct scale_factors *scale) {
+ return scale_value_y(y_offset, scale) * stride +
+ scale_value_x(x_offset, scale);
+}
+
+static void setup_pred_block(YV12_BUFFER_CONFIG *dst,
+ const YV12_BUFFER_CONFIG *src,
+ int mb_row, int mb_col,
+ const struct scale_factors *scale,
+ const struct scale_factors *scale_uv) {
+ const int recon_y_stride = src->y_stride;
+ const int recon_uv_stride = src->uv_stride;
+ int recon_yoffset;
+ int recon_uvoffset;
+
+ if (scale) {
+ recon_yoffset = scaled_buffer_offset(16 * mb_col, 16 * mb_row,
+ recon_y_stride, scale);
+ recon_uvoffset = scaled_buffer_offset(8 * mb_col, 8 * mb_row,
+ recon_uv_stride, scale_uv);
+ } else {
+ recon_yoffset = 16 * mb_row * recon_y_stride + 16 * mb_col;
+ recon_uvoffset = 8 * mb_row * recon_uv_stride + 8 * mb_col;
+ }
+ *dst = *src;
+ dst->y_buffer += recon_yoffset;
+ dst->u_buffer += recon_uvoffset;
+ dst->v_buffer += recon_uvoffset;
+}
+
+static void set_scale_factors(MACROBLOCKD *xd,
+ int ref0, int ref1,
+ struct scale_factors scale_factor[MAX_REF_FRAMES]) {
+
+ xd->scale_factor[0] = scale_factor[ref0 >= 0 ? ref0 : 0];
+ xd->scale_factor[1] = scale_factor[ref1 >= 0 ? ref1 : 0];
+ xd->scale_factor_uv[0] = xd->scale_factor[0];
+ xd->scale_factor_uv[1] = xd->scale_factor[1];
+}
+
#endif // VP9_COMMON_VP9_RECONINTER_H_
diff --git a/vp9/common/vp9_reconintra.h b/vp9/common/vp9_reconintra.h
index 88584ad3b..3031fb699 100644
--- a/vp9/common/vp9_reconintra.h
+++ b/vp9/common/vp9_reconintra.h
@@ -14,37 +14,43 @@
#include "vpx/vpx_integer.h"
#include "vp9/common/vp9_blockd.h"
-extern void vp9_recon_intra_mbuv(MACROBLOCKD *xd);
-extern B_PREDICTION_MODE vp9_find_dominant_direction(uint8_t *ptr,
- int stride, int n);
-extern B_PREDICTION_MODE vp9_find_bpred_context(BLOCKD *x);
+void vp9_recon_intra_mbuv(MACROBLOCKD *xd);
+
+B_PREDICTION_MODE vp9_find_dominant_direction(uint8_t *ptr,
+ int stride, int n);
+
+B_PREDICTION_MODE vp9_find_bpred_context(BLOCKD *x);
+
#if CONFIG_COMP_INTERINTRA_PRED
-extern void vp9_build_interintra_16x16_predictors_mb(MACROBLOCKD *xd,
- uint8_t *ypred,
- uint8_t *upred,
- uint8_t *vpred,
- int ystride,
- int uvstride);
-extern void vp9_build_interintra_16x16_predictors_mby(MACROBLOCKD *xd,
- uint8_t *ypred,
- int ystride);
-extern void vp9_build_interintra_16x16_predictors_mbuv(MACROBLOCKD *xd,
- uint8_t *upred,
- uint8_t *vpred,
- int uvstride);
+void vp9_build_interintra_16x16_predictors_mb(MACROBLOCKD *xd,
+ uint8_t *ypred,
+ uint8_t *upred,
+ uint8_t *vpred,
+ int ystride,
+ int uvstride);
+
+void vp9_build_interintra_16x16_predictors_mby(MACROBLOCKD *xd,
+ uint8_t *ypred,
+ int ystride);
+
+void vp9_build_interintra_16x16_predictors_mbuv(MACROBLOCKD *xd,
+ uint8_t *upred,
+ uint8_t *vpred,
+ int uvstride);
#endif // CONFIG_COMP_INTERINTRA_PRED
-extern void vp9_build_interintra_32x32_predictors_sb(MACROBLOCKD *xd,
- uint8_t *ypred,
- uint8_t *upred,
- uint8_t *vpred,
- int ystride,
- int uvstride);
-extern void vp9_build_interintra_64x64_predictors_sb(MACROBLOCKD *xd,
- uint8_t *ypred,
- uint8_t *upred,
- uint8_t *vpred,
- int ystride,
- int uvstride);
+void vp9_build_interintra_32x32_predictors_sb(MACROBLOCKD *xd,
+ uint8_t *ypred,
+ uint8_t *upred,
+ uint8_t *vpred,
+ int ystride,
+ int uvstride);
+
+void vp9_build_interintra_64x64_predictors_sb(MACROBLOCKD *xd,
+ uint8_t *ypred,
+ uint8_t *upred,
+ uint8_t *vpred,
+ int ystride,
+ int uvstride);
#endif // VP9_COMMON_VP9_RECONINTRA_H_
diff --git a/vp9/common/vp9_rtcd.c b/vp9/common/vp9_rtcd.c
index 277d5b217..72613ae07 100644
--- a/vp9/common/vp9_rtcd.c
+++ b/vp9/common/vp9_rtcd.c
@@ -12,10 +12,9 @@
#include "vp9_rtcd.h"
#include "vpx_ports/vpx_once.h"
-extern void vpx_scale_rtcd(void);
+void vpx_scale_rtcd(void);
-void vp9_rtcd()
-{
+void vp9_rtcd() {
vpx_scale_rtcd();
once(setup_rtcd_internal);
}
diff --git a/vp9/common/vp9_rtcd_defs.sh b/vp9/common/vp9_rtcd_defs.sh
index 700af7fa7..02a6711e5 100644
--- a/vp9/common/vp9_rtcd_defs.sh
+++ b/vp9/common/vp9_rtcd_defs.sh
@@ -296,7 +296,7 @@ specialize vp9_short_iht16x16
# dct and add
prototype void vp9_dc_only_idct_add "int input_dc, uint8_t *pred_ptr, uint8_t *dst_ptr, int pitch, int stride"
-specialize vp9_dc_only_idct_add
+specialize vp9_dc_only_idct_add sse2
prototype void vp9_short_inv_walsh4x4_1_x8 "int16_t *input, int16_t *output, int pitch"
specialize vp9_short_inv_walsh4x4_1_x8
diff --git a/vp9/common/vp9_setupintrarecon.h b/vp9/common/vp9_setupintrarecon.h
index 457265528..e389f3c91 100644
--- a/vp9/common/vp9_setupintrarecon.h
+++ b/vp9/common/vp9_setupintrarecon.h
@@ -13,6 +13,6 @@
#include "vpx_scale/yv12config.h"
-extern void vp9_setup_intra_recon(YV12_BUFFER_CONFIG *ybf);
+void vp9_setup_intra_recon(YV12_BUFFER_CONFIG *ybf);
#endif // VP9_COMMON_VP9_SETUPINTRARECON_H_
diff --git a/vp9/common/x86/vp9_idctllm_x86.c b/vp9/common/x86/vp9_idctllm_x86.c
new file mode 100644
index 000000000..667f5c1d3
--- /dev/null
+++ b/vp9/common/x86/vp9_idctllm_x86.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <emmintrin.h> // SSE2
+#include "./vpx_config.h"
+#include "vpx/vpx_integer.h"
+#include "vp9/common/vp9_common.h"
+#include "vp9/common/vp9_idct.h"
+
+#if HAVE_SSE2
+// In order to improve performance, clip absolute diff values to [0, 255],
+// which allows to keep the additions/subtractions in 8 bits.
+void vp9_dc_only_idct_add_sse2(int input_dc, uint8_t *pred_ptr,
+ uint8_t *dst_ptr, int pitch, int stride) {
+ int a1;
+ int16_t out;
+ uint8_t abs_diff;
+ __m128i p0, p1, p2, p3;
+ unsigned int extended_diff;
+ __m128i diff;
+
+ out = dct_const_round_shift(input_dc * cospi_16_64);
+ out = dct_const_round_shift(out * cospi_16_64);
+ a1 = ROUND_POWER_OF_TWO(out, 4);
+
+ // Read prediction data.
+ p0 = _mm_cvtsi32_si128 (*(const int *)(pred_ptr + 0 * pitch));
+ p1 = _mm_cvtsi32_si128 (*(const int *)(pred_ptr + 1 * pitch));
+ p2 = _mm_cvtsi32_si128 (*(const int *)(pred_ptr + 2 * pitch));
+ p3 = _mm_cvtsi32_si128 (*(const int *)(pred_ptr + 3 * pitch));
+
+ // Unpack prediction data, and store 4x4 array in 1 XMM register.
+ p0 = _mm_unpacklo_epi32(p0, p1);
+ p2 = _mm_unpacklo_epi32(p2, p3);
+ p0 = _mm_unpacklo_epi64(p0, p2);
+
+ // Clip dc value to [0, 255] range. Then, do addition or subtraction
+ // according to its sign.
+ if (a1 >= 0) {
+ abs_diff = (a1 > 255) ? 255 : a1;
+ extended_diff = abs_diff * 0x01010101u;
+ diff = _mm_shuffle_epi32(_mm_cvtsi32_si128((int)extended_diff), 0);
+
+ p1 = _mm_adds_epu8(p0, diff);
+ } else {
+ abs_diff = (a1 < -255) ? 255 : -a1;
+ extended_diff = abs_diff * 0x01010101u;
+ diff = _mm_shuffle_epi32(_mm_cvtsi32_si128((int)extended_diff), 0);
+
+ p1 = _mm_subs_epu8(p0, diff);
+ }
+
+ // Store results to dst.
+ *(int *)dst_ptr = _mm_cvtsi128_si32(p1);
+ dst_ptr += stride;
+
+ p1 = _mm_srli_si128(p1, 4);
+ *(int *)dst_ptr = _mm_cvtsi128_si32(p1);
+ dst_ptr += stride;
+
+ p1 = _mm_srli_si128(p1, 4);
+ *(int *)dst_ptr = _mm_cvtsi128_si32(p1);
+ dst_ptr += stride;
+
+ p1 = _mm_srli_si128(p1, 4);
+ *(int *)dst_ptr = _mm_cvtsi128_si32(p1);
+}
+#endif