diff options
author | Johann <johannkoenig@google.com> | 2017-05-12 18:14:27 -0700 |
---|---|---|
committer | Johann <johannkoenig@google.com> | 2017-05-17 12:11:31 -0700 |
commit | 2057d3ef757a18e6bb005812a9912748ae4c7610 (patch) | |
tree | bfa82225864dee01e743ead5b37f19f2e35b11f9 /vp8/common/arm/neon | |
parent | 7498fe2e542183ff6e8091608ae57fade2bde4ee (diff) | |
download | libvpx-2057d3ef757a18e6bb005812a9912748ae4c7610.tar libvpx-2057d3ef757a18e6bb005812a9912748ae4c7610.tar.gz libvpx-2057d3ef757a18e6bb005812a9912748ae4c7610.tar.bz2 libvpx-2057d3ef757a18e6bb005812a9912748ae4c7610.zip |
use memcpy for unaligned neon stores
Advise the compiler that the store is eventually going to a uint8_t
buffer. This helps avoid getting alignment hints which would cause the
memory access to fail.
Originally added as a workaround for clang:
https://bugs.llvm.org//show_bug.cgi?id=24421
Change-Id: Ie9854b777cfb2f4baaee66764f0e51dcb094d51e
Diffstat (limited to 'vp8/common/arm/neon')
-rw-r--r-- | vp8/common/arm/neon/bilinearpredict_neon.c | 34 | ||||
-rw-r--r-- | vp8/common/arm/neon/sixtappredict_neon.c | 36 |
2 files changed, 7 insertions, 63 deletions
diff --git a/vp8/common/arm/neon/bilinearpredict_neon.c b/vp8/common/arm/neon/bilinearpredict_neon.c index af566c2c4..8520ab5ca 100644 --- a/vp8/common/arm/neon/bilinearpredict_neon.c +++ b/vp8/common/arm/neon/bilinearpredict_neon.c @@ -11,6 +11,7 @@ #include <arm_neon.h> #include <string.h> #include "./vpx_config.h" +#include "vpx_dsp/arm/mem_neon.h" static const uint8_t bifilter4_coeff[8][2] = { { 128, 0 }, { 112, 16 }, { 96, 32 }, { 80, 48 }, @@ -21,35 +22,6 @@ static INLINE uint8x8_t load_and_shift(const unsigned char *a) { return vreinterpret_u8_u64(vshl_n_u64(vreinterpret_u64_u8(vld1_u8(a)), 32)); } -static INLINE void store4x4(unsigned char *dst, int dst_stride, - const uint8x8_t a0, const uint8x8_t a1) { - if (!((uintptr_t)dst & 0x3) && !(dst_stride & 0x3)) { - vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(a0), 0); - dst += dst_stride; - vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(a0), 1); - dst += dst_stride; - vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(a1), 0); - dst += dst_stride; - vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(a1), 1); - } else { - // Store to the aligned local buffer and memcpy instead of vget_lane_u8 - // which is really really slow. - uint32_t output_buffer[4]; - vst1_lane_u32(output_buffer, vreinterpret_u32_u8(a0), 0); - vst1_lane_u32(output_buffer + 1, vreinterpret_u32_u8(a0), 1); - vst1_lane_u32(output_buffer + 2, vreinterpret_u32_u8(a1), 0); - vst1_lane_u32(output_buffer + 3, vreinterpret_u32_u8(a1), 1); - - memcpy(dst, output_buffer, 4); - dst += dst_stride; - memcpy(dst, output_buffer + 1, 4); - dst += dst_stride; - memcpy(dst, output_buffer + 2, 4); - dst += dst_stride; - memcpy(dst, output_buffer + 3, 4); - } -} - void vp8_bilinear_predict4x4_neon(unsigned char *src_ptr, int src_pixels_per_line, int xoffset, int yoffset, unsigned char *dst_ptr, @@ -122,7 +94,7 @@ void vp8_bilinear_predict4x4_neon(unsigned char *src_ptr, // secondpass_filter if (yoffset == 0) { // skip_2ndpass_filter - store4x4(dst_ptr, dst_pitch, e0, e1); + store_unaligned_u8q(dst_ptr, dst_pitch, vcombine_u8(e0, e1)); } else { uint8x8_t f0, f1; const uint8x8_t filter0 = vdup_n_u8(bifilter4_coeff[yoffset][0]); @@ -140,7 +112,7 @@ void vp8_bilinear_predict4x4_neon(unsigned char *src_ptr, f0 = vqrshrn_n_u16(b0, 7); f1 = vqrshrn_n_u16(b1, 7); - store4x4(dst_ptr, dst_pitch, f0, f1); + store_unaligned_u8q(dst_ptr, dst_pitch, vcombine_u8(f0, f1)); } } diff --git a/vp8/common/arm/neon/sixtappredict_neon.c b/vp8/common/arm/neon/sixtappredict_neon.c index fbb552ebe..aa2567df7 100644 --- a/vp8/common/arm/neon/sixtappredict_neon.c +++ b/vp8/common/arm/neon/sixtappredict_neon.c @@ -11,6 +11,7 @@ #include <arm_neon.h> #include <string.h> #include "./vpx_config.h" +#include "vpx_dsp/arm/mem_neon.h" #include "vpx_ports/mem.h" static const int8_t vp8_sub_pel_filters[8][8] = { @@ -42,35 +43,6 @@ static INLINE uint8x8_t load_and_shift(const unsigned char *a) { return vreinterpret_u8_u64(vshl_n_u64(vreinterpret_u64_u8(vld1_u8(a)), 32)); } -static INLINE void store4x4(unsigned char *dst, int dst_stride, - const uint8x8_t a0, const uint8x8_t a1) { - if (!((uintptr_t)dst & 0x3) && !(dst_stride & 0x3)) { - vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(a0), 0); - dst += dst_stride; - vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(a0), 1); - dst += dst_stride; - vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(a1), 0); - dst += dst_stride; - vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(a1), 1); - } else { - // Store to the aligned local buffer and memcpy instead of vget_lane_u8 - // which is really really slow. - uint32_t output_buffer[4]; - vst1_lane_u32(output_buffer, vreinterpret_u32_u8(a0), 0); - vst1_lane_u32(output_buffer + 1, vreinterpret_u32_u8(a0), 1); - vst1_lane_u32(output_buffer + 2, vreinterpret_u32_u8(a1), 0); - vst1_lane_u32(output_buffer + 3, vreinterpret_u32_u8(a1), 1); - - memcpy(dst, output_buffer, 4); - dst += dst_stride; - memcpy(dst, output_buffer + 1, 4); - dst += dst_stride; - memcpy(dst, output_buffer + 2, 4); - dst += dst_stride; - memcpy(dst, output_buffer + 3, 4); - } -} - static INLINE void filter_add_accumulate(const uint8x16_t a, const uint8x16_t b, const uint8x8_t filter, uint16x8_t *c, uint16x8_t *d) { @@ -180,7 +152,7 @@ static INLINE void yonly4x4(const unsigned char *src, int src_stride, e0 = vqrshrun_n_s16(d0, 7); e1 = vqrshrun_n_s16(d1, 7); - store4x4(dst, dst_stride, e0, e1); + store_unaligned_u8q(dst, dst_stride, vcombine_u8(e0, e1)); } void vp8_sixtap_predict4x4_neon(unsigned char *src_ptr, int src_pixels_per_line, @@ -297,7 +269,7 @@ void vp8_sixtap_predict4x4_neon(unsigned char *src_ptr, int src_pixels_per_line, b2 = vqrshrun_n_s16(e4567, 7); if (yoffset == 0) { // firstpass_filter4x4_only - store4x4(dst_ptr, dst_pitch, b0, b2); + store_unaligned_u8q(dst_ptr, dst_pitch, vcombine_u8(b0, b2)); return; } @@ -411,7 +383,7 @@ void vp8_sixtap_predict4x4_neon(unsigned char *src_ptr, int src_pixels_per_line, e0 = vqrshrun_n_s16(d0, 7); e1 = vqrshrun_n_s16(d1, 7); - store4x4(dst_ptr, dst_pitch, e0, e1); + store_unaligned_u8q(dst_ptr, dst_pitch, vcombine_u8(e0, e1)); } void vp8_sixtap_predict8x4_neon(unsigned char *src_ptr, int src_pixels_per_line, |