summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFrank Galligan <fgalligan@google.com>2015-01-27 23:01:44 -0800
committerGerrit Code Review <gerrit@gerrit.golo.chromium.org>2015-01-27 23:01:44 -0800
commiteb12d880ab3b25a3839a7aa03ba0b24aea3023f1 (patch)
treede4e72aa7e870749a4c54865560c2a7461b5ad06
parent80a3a0792940bc10f4b6ce97fcc7f9de15103d35 (diff)
parent9f574d03168ae4a25dadf315099fa0296412bdd2 (diff)
downloadlibvpx-eb12d880ab3b25a3839a7aa03ba0b24aea3023f1.tar
libvpx-eb12d880ab3b25a3839a7aa03ba0b24aea3023f1.tar.gz
libvpx-eb12d880ab3b25a3839a7aa03ba0b24aea3023f1.tar.bz2
libvpx-eb12d880ab3b25a3839a7aa03ba0b24aea3023f1.zip
Merge "Add vp9_sad16x16x4d_neon Neon intrinsic function."
-rw-r--r--test/sad_test.cc2
-rw-r--r--vp9/common/vp9_rtcd_defs.pl2
-rw-r--r--vp9/encoder/arm/neon/vp9_sad4d_neon.c55
3 files changed, 58 insertions, 1 deletions
diff --git a/test/sad_test.cc b/test/sad_test.cc
index 9461ebb53..780f425e0 100644
--- a/test/sad_test.cc
+++ b/test/sad_test.cc
@@ -1244,8 +1244,10 @@ INSTANTIATE_TEST_CASE_P(AVX2, SADx4Test, ::testing::Values(
#endif // HAVE_AVX2
#if HAVE_NEON
+const SadMxNx4Func sad_16x16x4d_neon = vp9_sad16x16x4d_neon;
const SadMxNx4Func sad_64x64x4d_neon = vp9_sad64x64x4d_neon;
INSTANTIATE_TEST_CASE_P(NEON, SADx4Test, ::testing::Values(
+ make_tuple(16, 16, sad_16x16x4d_neon, -1),
make_tuple(64, 64, sad_64x64x4d_neon, -1)));
#endif // HAVE_NEON
#endif // CONFIG_VP9_ENCODER
diff --git a/vp9/common/vp9_rtcd_defs.pl b/vp9/common/vp9_rtcd_defs.pl
index 106adc6fe..271b55ab4 100644
--- a/vp9/common/vp9_rtcd_defs.pl
+++ b/vp9/common/vp9_rtcd_defs.pl
@@ -1067,7 +1067,7 @@ add_proto qw/void vp9_sad32x32x4d/, "const uint8_t *src_ptr, int src_stride, co
specialize qw/vp9_sad32x32x4d sse2 avx2/;
add_proto qw/void vp9_sad16x16x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array";
-specialize qw/vp9_sad16x16x4d sse2/;
+specialize qw/vp9_sad16x16x4d sse2 neon/;
add_proto qw/void vp9_sad16x8x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array";
specialize qw/vp9_sad16x8x4d sse2/;
diff --git a/vp9/encoder/arm/neon/vp9_sad4d_neon.c b/vp9/encoder/arm/neon/vp9_sad4d_neon.c
index ec71bcb0f..e77852cbc 100644
--- a/vp9/encoder/arm/neon/vp9_sad4d_neon.c
+++ b/vp9/encoder/arm/neon/vp9_sad4d_neon.c
@@ -105,3 +105,58 @@ void vp9_sad64x64x4d_neon(const uint8_t *src, int src_stride,
res[2] = horizontal_long_add_16x8(vec_sum_ref2_lo, vec_sum_ref2_hi);
res[3] = horizontal_long_add_16x8(vec_sum_ref3_lo, vec_sum_ref3_hi);
}
+
+void vp9_sad16x16x4d_neon(const uint8_t *src, int src_stride,
+ const uint8_t* const ref[4], int ref_stride,
+ unsigned int *res) {
+ int i;
+ uint16x8_t vec_sum_ref0_lo = vdupq_n_u16(0);
+ uint16x8_t vec_sum_ref0_hi = vdupq_n_u16(0);
+ uint16x8_t vec_sum_ref1_lo = vdupq_n_u16(0);
+ uint16x8_t vec_sum_ref1_hi = vdupq_n_u16(0);
+ uint16x8_t vec_sum_ref2_lo = vdupq_n_u16(0);
+ uint16x8_t vec_sum_ref2_hi = vdupq_n_u16(0);
+ uint16x8_t vec_sum_ref3_lo = vdupq_n_u16(0);
+ uint16x8_t vec_sum_ref3_hi = vdupq_n_u16(0);
+ const uint8_t *ref0, *ref1, *ref2, *ref3;
+ ref0 = ref[0];
+ ref1 = ref[1];
+ ref2 = ref[2];
+ ref3 = ref[3];
+
+ for (i = 0; i < 16; ++i) {
+ const uint8x16_t vec_src = vld1q_u8(src);
+ const uint8x16_t vec_ref0 = vld1q_u8(ref0);
+ const uint8x16_t vec_ref1 = vld1q_u8(ref1);
+ const uint8x16_t vec_ref2 = vld1q_u8(ref2);
+ const uint8x16_t vec_ref3 = vld1q_u8(ref3);
+
+ vec_sum_ref0_lo = vabal_u8(vec_sum_ref0_lo, vget_low_u8(vec_src),
+ vget_low_u8(vec_ref0));
+ vec_sum_ref0_hi = vabal_u8(vec_sum_ref0_hi, vget_high_u8(vec_src),
+ vget_high_u8(vec_ref0));
+ vec_sum_ref1_lo = vabal_u8(vec_sum_ref1_lo, vget_low_u8(vec_src),
+ vget_low_u8(vec_ref1));
+ vec_sum_ref1_hi = vabal_u8(vec_sum_ref1_hi, vget_high_u8(vec_src),
+ vget_high_u8(vec_ref1));
+ vec_sum_ref2_lo = vabal_u8(vec_sum_ref2_lo, vget_low_u8(vec_src),
+ vget_low_u8(vec_ref2));
+ vec_sum_ref2_hi = vabal_u8(vec_sum_ref2_hi, vget_high_u8(vec_src),
+ vget_high_u8(vec_ref2));
+ vec_sum_ref3_lo = vabal_u8(vec_sum_ref3_lo, vget_low_u8(vec_src),
+ vget_low_u8(vec_ref3));
+ vec_sum_ref3_hi = vabal_u8(vec_sum_ref3_hi, vget_high_u8(vec_src),
+ vget_high_u8(vec_ref3));
+
+ src += src_stride;
+ ref0 += ref_stride;
+ ref1 += ref_stride;
+ ref2 += ref_stride;
+ ref3 += ref_stride;
+ }
+
+ res[0] = horizontal_long_add_16x8(vec_sum_ref0_lo, vec_sum_ref0_hi);
+ res[1] = horizontal_long_add_16x8(vec_sum_ref1_lo, vec_sum_ref1_hi);
+ res[2] = horizontal_long_add_16x8(vec_sum_ref2_lo, vec_sum_ref2_hi);
+ res[3] = horizontal_long_add_16x8(vec_sum_ref3_lo, vec_sum_ref3_hi);
+}