diff options
author | Scott LaVarnway <slavarnway@google.com> | 2014-08-01 06:36:18 -0700 |
---|---|---|
committer | Scott LaVarnway <slavarnway@google.com> | 2014-08-01 06:36:18 -0700 |
commit | 545be78136513794ab2d9d84a644b5305817d332 (patch) | |
tree | dbcc0e24346dc656fc1e9215c448b841565ed20f | |
parent | 3249f26ff85e2bfe148167ce80df53643a89a2d2 (diff) | |
download | libvpx-545be78136513794ab2d9d84a644b5305817d332.tar libvpx-545be78136513794ab2d9d84a644b5305817d332.tar.gz libvpx-545be78136513794ab2d9d84a644b5305817d332.tar.bz2 libvpx-545be78136513794ab2d9d84a644b5305817d332.zip |
Added vp9_sad8x8_neon()
Change-Id: I3be8911121ef9a5f39f6c1a2e28f9e00972e0624
-rw-r--r-- | test/sad_test.cc | 2 | ||||
-rw-r--r-- | vp9/common/vp9_rtcd_defs.pl | 2 | ||||
-rw-r--r-- | vp9/encoder/arm/neon/vp9_sad_neon.c | 24 |
3 files changed, 22 insertions, 6 deletions
diff --git a/test/sad_test.cc b/test/sad_test.cc index dbd2cf56b..f07a98921 100644 --- a/test/sad_test.cc +++ b/test/sad_test.cc @@ -475,10 +475,12 @@ INSTANTIATE_TEST_CASE_P(NEON, SADTest, ::testing::Values( const SadMxNVp9Func sad_64x64_neon_vp9 = vp9_sad64x64_neon; const SadMxNVp9Func sad_32x32_neon_vp9 = vp9_sad32x32_neon; const SadMxNVp9Func sad_16x16_neon_vp9 = vp9_sad16x16_neon; +const SadMxNVp9Func sad_8x8_neon_vp9 = vp9_sad8x8_neon; const SadMxNVp9Param neon_vp9_tests[] = { make_tuple(64, 64, sad_64x64_neon_vp9), make_tuple(32, 32, sad_32x32_neon_vp9), make_tuple(16, 16, sad_16x16_neon_vp9), + make_tuple(8, 8, sad_8x8_neon_vp9), }; INSTANTIATE_TEST_CASE_P(NEON, SADVP9Test, ::testing::ValuesIn(neon_vp9_tests)); #endif // CONFIG_VP9_ENCODER diff --git a/vp9/common/vp9_rtcd_defs.pl b/vp9/common/vp9_rtcd_defs.pl index 3b1ca161d..41f867b33 100644 --- a/vp9/common/vp9_rtcd_defs.pl +++ b/vp9/common/vp9_rtcd_defs.pl @@ -554,7 +554,7 @@ add_proto qw/unsigned int vp9_sad8x16/, "const uint8_t *src_ptr, int source_stri specialize qw/vp9_sad8x16 mmx/, "$sse2_x86inc"; add_proto qw/unsigned int vp9_sad8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride"; -specialize qw/vp9_sad8x8 mmx/, "$sse2_x86inc"; +specialize qw/vp9_sad8x8 mmx neon/, "$sse2_x86inc"; add_proto qw/unsigned int vp9_sad8x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride"; specialize qw/vp9_sad8x4/, "$sse2_x86inc"; diff --git a/vp9/encoder/arm/neon/vp9_sad_neon.c b/vp9/encoder/arm/neon/vp9_sad_neon.c index fe40b5452..c4cd85680 100644 --- a/vp9/encoder/arm/neon/vp9_sad_neon.c +++ b/vp9/encoder/arm/neon/vp9_sad_neon.c @@ -26,9 +26,8 @@ static INLINE unsigned int horizontal_long_add_16x8(const uint16x8_t vec_lo, vreinterpret_u32_u64(vget_high_u64(b))); return vget_lane_u32(c, 0); } -static INLINE unsigned int horizontal_add_16x8(const uint16x8_t vec_lo, - const uint16x8_t vec_hi) { - const uint32x4_t a = vpaddlq_u16(vaddq_u16(vec_lo, vec_hi)); +static INLINE unsigned int horizontal_add_16x8(const uint16x8_t vec_16x8) { + const uint32x4_t a = vpaddlq_u16(vec_16x8); const uint64x2_t b = vpaddlq_u32(a); const uint32x2_t c = vadd_u32(vreinterpret_u32_u64(vget_low_u64(b)), vreinterpret_u32_u64(vget_high_u64(b))); @@ -93,7 +92,7 @@ unsigned int vp9_sad32x32_neon(const uint8_t *src, int src_stride, vec_accum_hi = vabal_u8(vec_accum_hi, vget_high_u8(vec_src_16), vget_high_u8(vec_ref_16)); } - return horizontal_add_16x8(vec_accum_lo, vec_accum_hi); + return horizontal_add_16x8(vaddq_u16(vec_accum_lo, vec_accum_hi)); } unsigned int vp9_sad16x16_neon(const uint8_t *src, int src_stride, @@ -112,5 +111,20 @@ unsigned int vp9_sad16x16_neon(const uint8_t *src, int src_stride, vec_accum_hi = vabal_u8(vec_accum_hi, vget_high_u8(vec_src), vget_high_u8(vec_ref)); } - return horizontal_add_16x8(vec_accum_lo, vec_accum_hi); + return horizontal_add_16x8(vaddq_u16(vec_accum_lo, vec_accum_hi)); +} + +unsigned int vp9_sad8x8_neon(const uint8_t *src, int src_stride, + const uint8_t *ref, int ref_stride) { + int i; + uint16x8_t vec_accum = vdupq_n_u16(0); + + for (i = 0; i < 8; ++i) { + const uint8x8_t vec_src = vld1_u8(src); + const uint8x8_t vec_ref = vld1_u8(ref); + src += src_stride; + ref += ref_stride; + vec_accum = vabal_u8(vec_accum, vec_src, vec_ref); + } + return horizontal_add_16x8(vec_accum); } |