summaryrefslogtreecommitdiff
path: root/vpx_dsp
diff options
context:
space:
mode:
authorSalome Thirot <salome.thirot@arm.com>2023-02-01 16:37:24 +0000
committerSalome Thirot <salome.thirot@arm.com>2023-02-06 15:51:43 +0000
commite3028ddbb408381601ab8d2c67be37124a9726e5 (patch)
tree13533af79d25d09c914030ba41f1ed26dcd78bba /vpx_dsp
parent858a8c611f4c965078485860a6820e2135e6611b (diff)
downloadlibvpx-e3028ddbb408381601ab8d2c67be37124a9726e5.tar
libvpx-e3028ddbb408381601ab8d2c67be37124a9726e5.tar.gz
libvpx-e3028ddbb408381601ab8d2c67be37124a9726e5.tar.bz2
libvpx-e3028ddbb408381601ab8d2c67be37124a9726e5.zip
Optimize Neon implementation of high bitdepth SAD functions
Optimizations take a similar form to those implemented for standard bitdepth SAD: - Use ABD, UADALP instead of ABAL, ABAL2 (double the throughput on modern out-of-order Arm-designed cores.) - Use more accumulator registers to make better use of Neon pipeline resources on Arm CPUs that have four Neon pipes. Change-Id: I9e626d7fa0e271908dc43448405a7985b80e6230
Diffstat (limited to 'vpx_dsp')
-rw-r--r--vpx_dsp/arm/highbd_sad_neon.c209
1 files changed, 149 insertions, 60 deletions
diff --git a/vpx_dsp/arm/highbd_sad_neon.c b/vpx_dsp/arm/highbd_sad_neon.c
index ecb52ce5a..8415481f0 100644
--- a/vpx_dsp/arm/highbd_sad_neon.c
+++ b/vpx_dsp/arm/highbd_sad_neon.c
@@ -17,53 +17,169 @@
#include "vpx_dsp/arm/mem_neon.h"
#include "vpx_dsp/arm/sum_neon.h"
-static VPX_FORCE_INLINE uint32_t highbd_sad4_neon(const uint8_t *src_ptr,
- int src_stride,
- const uint8_t *ref_ptr,
- int ref_stride, int width,
- int height) {
- int i, j;
- uint32x4_t sum_abs_diff = vdupq_n_u32(0);
+static INLINE uint32_t highbd_sad4xh_neon(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride, int h) {
const uint16_t *src16_ptr = CONVERT_TO_SHORTPTR(src_ptr);
const uint16_t *ref16_ptr = CONVERT_TO_SHORTPTR(ref_ptr);
- for (i = 0; i < height; i++) {
- for (j = 0; j < width; j += 4) {
- const uint16x4_t src_u16 = vld1_u16(src16_ptr + j);
- const uint16x4_t ref_u16 = vld1_u16(ref16_ptr + j);
- sum_abs_diff = vabal_u16(sum_abs_diff, src_u16, ref_u16);
- }
+ uint32x4_t sum = vdupq_n_u32(0);
+
+ int i = h;
+ do {
+ uint16x4_t s = vld1_u16(src16_ptr);
+ uint16x4_t r = vld1_u16(ref16_ptr);
+ sum = vabal_u16(sum, s, r);
+
src16_ptr += src_stride;
ref16_ptr += ref_stride;
- }
+ } while (--i != 0);
- return horizontal_add_uint32x4(sum_abs_diff);
+ return horizontal_add_uint32x4(sum);
}
-static VPX_FORCE_INLINE uint32_t highbd_sad8_neon(const uint8_t *src_ptr,
- int src_stride,
- const uint8_t *ref_ptr,
- int ref_stride, int width,
- int height) {
- int i, j;
- uint32x4_t sum_abs_diff = vdupq_n_u32(0);
+static INLINE uint32_t highbd_sad8xh_neon(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride, int h) {
const uint16_t *src16_ptr = CONVERT_TO_SHORTPTR(src_ptr);
const uint16_t *ref16_ptr = CONVERT_TO_SHORTPTR(ref_ptr);
- for (i = 0; i < height; i++) {
- for (j = 0; j < width; j += 8) {
- const uint16x8_t src_u16 = vld1q_u16(src16_ptr + j);
- const uint16x8_t ref_u16 = vld1q_u16(ref16_ptr + j);
- sum_abs_diff =
- vabal_u16(sum_abs_diff, vget_low_u16(src_u16), vget_low_u16(ref_u16));
- sum_abs_diff = vabal_u16(sum_abs_diff, vget_high_u16(src_u16),
- vget_high_u16(ref_u16));
- }
+ uint32x4_t sum = vdupq_n_u32(0);
+
+ int i = h;
+ do {
+ uint16x8_t s = vld1q_u16(src16_ptr);
+ uint16x8_t r = vld1q_u16(ref16_ptr);
+ uint16x8_t diff = vabdq_u16(s, r);
+ sum = vpadalq_u16(sum, diff);
+
src16_ptr += src_stride;
ref16_ptr += ref_stride;
- }
+ } while (--i != 0);
- return horizontal_add_uint32x4(sum_abs_diff);
+ return horizontal_add_uint32x4(sum);
+}
+
+static INLINE uint32_t highbd_sad16xh_neon(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride, int h) {
+ const uint16_t *src16_ptr = CONVERT_TO_SHORTPTR(src_ptr);
+ const uint16_t *ref16_ptr = CONVERT_TO_SHORTPTR(ref_ptr);
+ uint32x4_t sum[2] = { vdupq_n_u32(0), vdupq_n_u32(0) };
+
+ int i = h;
+ do {
+ uint16x8_t s0, s1, r0, r1;
+ uint16x8_t diff0, diff1;
+
+ s0 = vld1q_u16(src16_ptr);
+ r0 = vld1q_u16(ref16_ptr);
+ diff0 = vabdq_u16(s0, r0);
+ sum[0] = vpadalq_u16(sum[0], diff0);
+
+ s1 = vld1q_u16(src16_ptr + 8);
+ r1 = vld1q_u16(ref16_ptr + 8);
+ diff1 = vabdq_u16(s1, r1);
+ sum[1] = vpadalq_u16(sum[1], diff1);
+
+ src16_ptr += src_stride;
+ ref16_ptr += ref_stride;
+ } while (--i != 0);
+
+ sum[0] = vaddq_u32(sum[0], sum[1]);
+ return horizontal_add_uint32x4(sum[0]);
}
+static INLINE uint32_t highbd_sadwxh_neon(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride, int w, int h) {
+ const uint16_t *src16_ptr = CONVERT_TO_SHORTPTR(src_ptr);
+ const uint16_t *ref16_ptr = CONVERT_TO_SHORTPTR(ref_ptr);
+ uint32x4_t sum[4] = { vdupq_n_u32(0), vdupq_n_u32(0), vdupq_n_u32(0),
+ vdupq_n_u32(0) };
+
+ int i = h;
+ do {
+ int j = 0;
+ do {
+ uint16x8_t s0, s1, s2, s3, r0, r1, r2, r3;
+ uint16x8_t diff0, diff1, diff2, diff3;
+
+ s0 = vld1q_u16(src16_ptr + j);
+ r0 = vld1q_u16(ref16_ptr + j);
+ diff0 = vabdq_u16(s0, r0);
+ sum[0] = vpadalq_u16(sum[0], diff0);
+
+ s1 = vld1q_u16(src16_ptr + j + 8);
+ r1 = vld1q_u16(ref16_ptr + j + 8);
+ diff1 = vabdq_u16(s1, r1);
+ sum[1] = vpadalq_u16(sum[1], diff1);
+
+ s2 = vld1q_u16(src16_ptr + j + 16);
+ r2 = vld1q_u16(ref16_ptr + j + 16);
+ diff2 = vabdq_u16(s2, r2);
+ sum[2] = vpadalq_u16(sum[2], diff2);
+
+ s3 = vld1q_u16(src16_ptr + j + 24);
+ r3 = vld1q_u16(ref16_ptr + j + 24);
+ diff3 = vabdq_u16(s3, r3);
+ sum[3] = vpadalq_u16(sum[3], diff3);
+
+ j += 32;
+ } while (j < w);
+
+ src16_ptr += src_stride;
+ ref16_ptr += ref_stride;
+ } while (--i != 0);
+
+ sum[0] = vaddq_u32(sum[0], sum[1]);
+ sum[2] = vaddq_u32(sum[2], sum[3]);
+ sum[0] = vaddq_u32(sum[0], sum[2]);
+
+ return horizontal_add_uint32x4(sum[0]);
+}
+
+static INLINE unsigned int highbd_sad64xh_neon(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride, int h) {
+ return highbd_sadwxh_neon(src_ptr, src_stride, ref_ptr, ref_stride, 64, h);
+}
+
+static INLINE unsigned int highbd_sad32xh_neon(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride, int h) {
+ return highbd_sadwxh_neon(src_ptr, src_stride, ref_ptr, ref_stride, 32, h);
+}
+
+#define HBD_SAD_WXH_NEON(w, h) \
+ unsigned int vpx_highbd_sad##w##x##h##_neon( \
+ const uint8_t *src, int src_stride, const uint8_t *ref, \
+ int ref_stride) { \
+ return highbd_sad##w##xh_neon(src, src_stride, ref, ref_stride, (h)); \
+ }
+
+HBD_SAD_WXH_NEON(4, 4)
+HBD_SAD_WXH_NEON(4, 8)
+
+HBD_SAD_WXH_NEON(8, 4)
+HBD_SAD_WXH_NEON(8, 8)
+HBD_SAD_WXH_NEON(8, 16)
+
+HBD_SAD_WXH_NEON(16, 8)
+HBD_SAD_WXH_NEON(16, 16)
+HBD_SAD_WXH_NEON(16, 32)
+
+HBD_SAD_WXH_NEON(32, 16)
+HBD_SAD_WXH_NEON(32, 32)
+HBD_SAD_WXH_NEON(32, 64)
+
+HBD_SAD_WXH_NEON(64, 32)
+HBD_SAD_WXH_NEON(64, 64)
+
static VPX_FORCE_INLINE uint32_t highbd_sad4_avg_neon(
const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr,
int ref_stride, const uint8_t *second_pred, int width, int height) {
@@ -115,20 +231,6 @@ static VPX_FORCE_INLINE uint32_t highbd_sad8_avg_neon(
return horizontal_add_uint32x4(sum_abs_diff);
}
-#define highbd_sad4MxN(m, n) \
- unsigned int vpx_highbd_sad##m##x##n##_neon( \
- const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, \
- int ref_stride) { \
- return highbd_sad4_neon(src_ptr, src_stride, ref_ptr, ref_stride, m, n); \
- }
-
-#define highbd_sadMxN(m, n) \
- unsigned int vpx_highbd_sad##m##x##n##_neon( \
- const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, \
- int ref_stride) { \
- return highbd_sad8_neon(src_ptr, src_stride, ref_ptr, ref_stride, m, n); \
- }
-
#define highbd_sad4MxN_avg(m, n) \
unsigned int vpx_highbd_sad##m##x##n##_avg_neon( \
const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, \
@@ -159,67 +261,54 @@ static VPX_FORCE_INLINE uint32_t highbd_sad8_avg_neon(
/* clang-format off */
// 4x4
-highbd_sad4MxN(4, 4)
highbd_sad4MxN_avg(4, 4)
highbd_sadMxNx4D(4, 4)
// 4x8
-highbd_sad4MxN(4, 8)
highbd_sad4MxN_avg(4, 8)
highbd_sadMxNx4D(4, 8)
// 8x4
-highbd_sadMxN(8, 4)
highbd_sadMxN_avg(8, 4)
highbd_sadMxNx4D(8, 4)
// 8x8
-highbd_sadMxN(8, 8)
highbd_sadMxN_avg(8, 8)
highbd_sadMxNx4D(8, 8)
// 8x16
-highbd_sadMxN(8, 16)
highbd_sadMxN_avg(8, 16)
highbd_sadMxNx4D(8, 16)
// 16x8
-highbd_sadMxN(16, 8)
highbd_sadMxN_avg(16, 8)
highbd_sadMxNx4D(16, 8)
// 16x16
-highbd_sadMxN(16, 16)
highbd_sadMxN_avg(16, 16)
highbd_sadMxNx4D(16, 16)
// 16x32
-highbd_sadMxN(16, 32)
highbd_sadMxN_avg(16, 32)
highbd_sadMxNx4D(16, 32)
// 32x16
-highbd_sadMxN(32, 16)
highbd_sadMxN_avg(32, 16)
highbd_sadMxNx4D(32, 16)
// 32x32
-highbd_sadMxN(32, 32)
highbd_sadMxN_avg(32, 32)
highbd_sadMxNx4D(32, 32)
// 32x64
-highbd_sadMxN(32, 64)
highbd_sadMxN_avg(32, 64)
highbd_sadMxNx4D(32, 64)
// 64x32
-highbd_sadMxN(64, 32)
highbd_sadMxN_avg(64, 32)
highbd_sadMxNx4D(64, 32)
// 64x64
-highbd_sadMxN(64, 64)
highbd_sadMxN_avg(64, 64)
highbd_sadMxNx4D(64, 64)
/* clang-format on */