diff options
author | Johann Koenig <johannkoenig@google.com> | 2017-05-22 17:48:32 +0000 |
---|---|---|
committer | Gerrit Code Review <noreply-gerritcodereview@google.com> | 2017-05-22 17:48:33 +0000 |
commit | e7cac130167c1da6d17caa33e216250d989d0fe8 (patch) | |
tree | e7c4629cc64616fbf0630477d81de50a644d199d /vpx_dsp/arm | |
parent | b3bf91bdc60220c004a22d21c867cc392e684b81 (diff) | |
parent | 7b742da63e4b829ba013670dd838d263f5df8956 (diff) | |
download | libvpx-e7cac130167c1da6d17caa33e216250d989d0fe8.tar libvpx-e7cac130167c1da6d17caa33e216250d989d0fe8.tar.gz libvpx-e7cac130167c1da6d17caa33e216250d989d0fe8.tar.bz2 libvpx-e7cac130167c1da6d17caa33e216250d989d0fe8.zip |
Merge changes Ib8dd96f7,Ie9854b77
* changes:
neon variance: process 4x blocks
use memcpy for unaligned neon stores
Diffstat (limited to 'vpx_dsp/arm')
-rw-r--r-- | vpx_dsp/arm/mem_neon.h | 42 | ||||
-rw-r--r-- | vpx_dsp/arm/variance_neon.c | 48 |
2 files changed, 89 insertions, 1 deletions
diff --git a/vpx_dsp/arm/mem_neon.h b/vpx_dsp/arm/mem_neon.h index ba5c3d513..23d2b4e08 100644 --- a/vpx_dsp/arm/mem_neon.h +++ b/vpx_dsp/arm/mem_neon.h @@ -69,6 +69,48 @@ static INLINE void store_s16q_to_tran_low(tran_low_t *buf, const int16x8_t a) { #endif } +// Propagate type information to the compiler. Without this the compiler may +// assume the required alignment of uint32_t (4 bytes) and add alignment hints +// to the memory access. +// +// This is used for functions operating on uint8_t which wish to load or store 4 +// values at a time but which may not be on 4 byte boundaries. +static INLINE void uint32_to_mem(uint8_t *buf, uint32_t a) { + memcpy(buf, &a, 4); +} + +// Load 4 sets of 4 bytes when alignment is not guaranteed. +static INLINE uint8x16_t load_unaligned_u8q(const uint8_t *buf, int stride) { + uint32_t a; + uint32x4_t a_u32 = vdupq_n_u32(0); + memcpy(&a, buf, 4); + buf += stride; + a_u32 = vld1q_lane_u32(&a, a_u32, 0); + memcpy(&a, buf, 4); + buf += stride; + a_u32 = vld1q_lane_u32(&a, a_u32, 1); + memcpy(&a, buf, 4); + buf += stride; + a_u32 = vld1q_lane_u32(&a, a_u32, 2); + memcpy(&a, buf, 4); + buf += stride; + a_u32 = vld1q_lane_u32(&a, a_u32, 3); + return vreinterpretq_u8_u32(a_u32); +} + +// Store 4 sets of 4 bytes when alignment is not guaranteed. +static INLINE void store_unaligned_u8q(uint8_t *buf, int stride, + const uint8x16_t a) { + const uint32x4_t a_u32 = vreinterpretq_u32_u8(a); + uint32_to_mem(buf, vgetq_lane_u32(a_u32, 0)); + buf += stride; + uint32_to_mem(buf, vgetq_lane_u32(a_u32, 1)); + buf += stride; + uint32_to_mem(buf, vgetq_lane_u32(a_u32, 2)); + buf += stride; + uint32_to_mem(buf, vgetq_lane_u32(a_u32, 3)); +} + // Load 2 sets of 4 bytes when alignment is guaranteed. static INLINE uint8x8_t load_u8(const uint8_t *buf, int stride) { uint32x2_t a = vdup_n_u32(0); diff --git a/vpx_dsp/arm/variance_neon.c b/vpx_dsp/arm/variance_neon.c index c0828e8f6..aac960b3a 100644 --- a/vpx_dsp/arm/variance_neon.c +++ b/vpx_dsp/arm/variance_neon.c @@ -14,6 +14,7 @@ #include "./vpx_config.h" #include "vpx/vpx_integer.h" +#include "vpx_dsp/arm/mem_neon.h" #include "vpx_ports/mem.h" static INLINE int horizontal_add_s16x8(const int16x8_t v_16x8) { @@ -32,6 +33,47 @@ static INLINE int horizontal_add_s32x4(const int32x4_t v_32x4) { } // w * h must be less than 2048 or sum_s16 may overflow. +// Process a block of width 4 four rows at a time. +static void variance_neon_w4x4(const uint8_t *a, int a_stride, const uint8_t *b, + int b_stride, int h, uint32_t *sse, int *sum) { + int i; + int16x8_t sum_s16 = vdupq_n_s16(0); + int32x4_t sse_lo_s32 = vdupq_n_s32(0); + int32x4_t sse_hi_s32 = vdupq_n_s32(0); + + for (i = 0; i < h; i += 4) { + const uint8x16_t a_u8 = load_unaligned_u8q(a, a_stride); + const uint8x16_t b_u8 = load_unaligned_u8q(b, b_stride); + const uint16x8_t diff_lo_u16 = + vsubl_u8(vget_low_u8(a_u8), vget_low_u8(b_u8)); + const uint16x8_t diff_hi_u16 = + vsubl_u8(vget_high_u8(a_u8), vget_high_u8(b_u8)); + + const int16x8_t diff_lo_s16 = vreinterpretq_s16_u16(diff_lo_u16); + const int16x8_t diff_hi_s16 = vreinterpretq_s16_u16(diff_hi_u16); + + sum_s16 = vaddq_s16(sum_s16, diff_lo_s16); + sum_s16 = vaddq_s16(sum_s16, diff_hi_s16); + + sse_lo_s32 = vmlal_s16(sse_lo_s32, vget_low_s16(diff_lo_s16), + vget_low_s16(diff_lo_s16)); + sse_lo_s32 = vmlal_s16(sse_lo_s32, vget_high_s16(diff_lo_s16), + vget_high_s16(diff_lo_s16)); + + sse_hi_s32 = vmlal_s16(sse_hi_s32, vget_low_s16(diff_hi_s16), + vget_low_s16(diff_hi_s16)); + sse_hi_s32 = vmlal_s16(sse_hi_s32, vget_high_s16(diff_hi_s16), + vget_high_s16(diff_hi_s16)); + + a += 4 * a_stride; + b += 4 * b_stride; + } + + *sum = horizontal_add_s16x8(sum_s16); + *sse = (uint32_t)horizontal_add_s32x4(vaddq_s32(sse_lo_s32, sse_hi_s32)); +} + +// w * h must be less than 2048 or sum_s16 may overflow. // Process a block of any size where the width is divisible by 16. static void variance_neon_w16(const uint8_t *a, int a_stride, const uint8_t *b, int b_stride, int w, int h, uint32_t *sse, @@ -127,7 +169,9 @@ void vpx_get16x16var_neon(const uint8_t *a, int a_stride, const uint8_t *b, const uint8_t *b, int b_stride, \ unsigned int *sse) { \ int sum; \ - if (n == 8) \ + if (n == 4) \ + variance_neon_w4x4(a, a_stride, b, b_stride, m, sse, &sum); \ + else if (n == 8) \ variance_neon_w8x2(a, a_stride, b, b_stride, m, sse, &sum); \ else \ variance_neon_w16(a, a_stride, b, b_stride, n, m, sse, &sum); \ @@ -137,6 +181,8 @@ void vpx_get16x16var_neon(const uint8_t *a, int a_stride, const uint8_t *b, return *sse - (uint32_t)(((int64_t)sum * sum) >> shift); \ } +varianceNxM(4, 4, 4); +varianceNxM(4, 8, 5); varianceNxM(8, 4, 5); varianceNxM(8, 8, 6); varianceNxM(8, 16, 7); |