diff options
author | James Zern <jzern@google.com> | 2015-07-30 19:46:55 -0700 |
---|---|---|
committer | James Zern <jzern@google.com> | 2015-07-31 17:31:58 -0700 |
commit | 7dc5a689b4bef3071ac032f3cb53917cf440d3f0 (patch) | |
tree | c7e841da0ce80813185077c5fa776e6569bb3411 /vp9/encoder/arm | |
parent | eb6b443bd266d3c1f7931830de92fd11b86e27cd (diff) | |
download | libvpx-7dc5a689b4bef3071ac032f3cb53917cf440d3f0.tar libvpx-7dc5a689b4bef3071ac032f3cb53917cf440d3f0.tar.gz libvpx-7dc5a689b4bef3071ac032f3cb53917cf440d3f0.tar.bz2 libvpx-7dc5a689b4bef3071ac032f3cb53917cf440d3f0.zip |
add vp9_vector_var_neon
~50-60% faster depending on the width
Change-Id: I9d007cfa10b9aaa2169c8c009d95522df6123a92
Diffstat (limited to 'vp9/encoder/arm')
-rw-r--r-- | vp9/encoder/arm/neon/vp9_avg_neon.c | 44 |
1 files changed, 44 insertions, 0 deletions
diff --git a/vp9/encoder/arm/neon/vp9_avg_neon.c b/vp9/encoder/arm/neon/vp9_avg_neon.c index 40d7e8779..d569ec95d 100644 --- a/vp9/encoder/arm/neon/vp9_avg_neon.c +++ b/vp9/encoder/arm/neon/vp9_avg_neon.c @@ -9,6 +9,8 @@ */ #include <arm_neon.h> +#include <assert.h> + #include "./vp9_rtcd.h" #include "./vpx_config.h" @@ -114,3 +116,45 @@ int16_t vp9_int_pro_col_neon(uint8_t const *ref, const int width) { return horizontal_add_u16x8(vec_sum); } + +// ref, src = [0, 510] - max diff = 16-bits +// bwl = {2, 3, 4}, width = {16, 32, 64} +int vp9_vector_var_neon(int16_t const *ref, int16_t const *src, const int bwl) { + int width = 4 << bwl; + int32x4_t sse = vdupq_n_s32(0); + int16x8_t total = vdupq_n_s16(0); + + assert(width >= 8); + assert((width % 8) == 0); + + do { + const int16x8_t r = vld1q_s16(ref); + const int16x8_t s = vld1q_s16(src); + const int16x8_t diff = vsubq_s16(r, s); // [-510, 510], 10 bits. + const int16x4_t diff_lo = vget_low_s16(diff); + const int16x4_t diff_hi = vget_high_s16(diff); + sse = vmlal_s16(sse, diff_lo, diff_lo); // dynamic range 26 bits. + sse = vmlal_s16(sse, diff_hi, diff_hi); + total = vaddq_s16(total, diff); // dynamic range 16 bits. + + ref += 8; + src += 8; + width -= 8; + } while (width != 0); + + { + // Note: 'total''s pairwise addition could be implemented similarly to + // horizontal_add_u16x8(), but one less vpaddl with 'total' when paired + // with the summation of 'sse' performed better on a Cortex-A15. + const int32x4_t t0 = vpaddlq_s16(total); // cascading summation of 'total' + const int32x2_t t1 = vadd_s32(vget_low_s32(t0), vget_high_s32(t0)); + const int32x2_t t2 = vpadd_s32(t1, t1); + const int t = vget_lane_s32(t2, 0); + const int64x2_t s0 = vpaddlq_s32(sse); // cascading summation of 'sse'. + const int32x2_t s1 = vadd_s32(vreinterpret_s32_s64(vget_low_s64(s0)), + vreinterpret_s32_s64(vget_high_s64(s0))); + const int s = vget_lane_s32(s1, 0); + const int shift_factor = bwl + 2; + return s - ((t * t) >> shift_factor); + } +} |