summaryrefslogtreecommitdiff
path: root/vpx_dsp
diff options
context:
space:
mode:
authorSalome Thirot <salome.thirot@arm.com>2023-02-03 11:00:19 +0000
committerSalome Thirot <salome.thirot@arm.com>2023-02-06 15:54:57 +0000
commit9a5cbfbc087210eabfac5b0c2d72d12852ac56ae (patch)
treef9df00cbd19bd15268456ba81bc1db1d4afc4fa6 /vpx_dsp
parente3028ddbb408381601ab8d2c67be37124a9726e5 (diff)
downloadlibvpx-9a5cbfbc087210eabfac5b0c2d72d12852ac56ae.tar
libvpx-9a5cbfbc087210eabfac5b0c2d72d12852ac56ae.tar.gz
libvpx-9a5cbfbc087210eabfac5b0c2d72d12852ac56ae.tar.bz2
libvpx-9a5cbfbc087210eabfac5b0c2d72d12852ac56ae.zip
Optimize Neon implementation of high bitdepth avg SAD functions
Optimizations take a similar form to those implemented for standard bitdepth averaging SAD: - Use ABD, UADALP instead of ABAL, ABAL2 (double the throughput on modern out-of-order Arm-designed cores.) - Use more accumulator registers to make better use of Neon pipeline resources on Arm CPUs that have four Neon pipes. Change-Id: I75c5f09948f6bf17200f82e00e7a827a80451108
Diffstat (limited to 'vpx_dsp')
-rw-r--r--vpx_dsp/arm/highbd_sad_neon.c244
1 files changed, 181 insertions, 63 deletions
diff --git a/vpx_dsp/arm/highbd_sad_neon.c b/vpx_dsp/arm/highbd_sad_neon.c
index 8415481f0..c76eb12b9 100644
--- a/vpx_dsp/arm/highbd_sad_neon.c
+++ b/vpx_dsp/arm/highbd_sad_neon.c
@@ -180,73 +180,204 @@ HBD_SAD_WXH_NEON(32, 64)
HBD_SAD_WXH_NEON(64, 32)
HBD_SAD_WXH_NEON(64, 64)
-static VPX_FORCE_INLINE uint32_t highbd_sad4_avg_neon(
- const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr,
- int ref_stride, const uint8_t *second_pred, int width, int height) {
- int i, j;
- uint32x4_t sum_abs_diff = vdupq_n_u32(0);
+static INLINE uint32_t highbd_sad4xh_avg_neon(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride, int h,
+ const uint8_t *second_pred) {
const uint16_t *src16_ptr = CONVERT_TO_SHORTPTR(src_ptr);
const uint16_t *ref16_ptr = CONVERT_TO_SHORTPTR(ref_ptr);
- const uint16_t *pred_ptr = CONVERT_TO_SHORTPTR(second_pred);
- for (i = 0; i < height; i++) {
- for (j = 0; j < width; j += 4) {
- const uint16x4_t a_u16 = vld1_u16(src16_ptr + j);
- const uint16x4_t b_u16 = vld1_u16(ref16_ptr + j);
- const uint16x4_t c_u16 = vld1_u16(pred_ptr + j);
- const uint16x4_t avg = vrhadd_u16(b_u16, c_u16);
- sum_abs_diff = vabal_u16(sum_abs_diff, a_u16, avg);
- }
+ const uint16_t *pred16_ptr = CONVERT_TO_SHORTPTR(second_pred);
+ uint32x4_t sum = vdupq_n_u32(0);
+
+ int i = h;
+ do {
+ uint16x4_t s = vld1_u16(src16_ptr);
+ uint16x4_t r = vld1_u16(ref16_ptr);
+ uint16x4_t p = vld1_u16(pred16_ptr);
+
+ uint16x4_t avg = vrhadd_u16(r, p);
+ sum = vabal_u16(sum, s, avg);
+
src16_ptr += src_stride;
ref16_ptr += ref_stride;
- pred_ptr += width;
- }
+ pred16_ptr += 4;
+ } while (--i != 0);
- return horizontal_add_uint32x4(sum_abs_diff);
+ return horizontal_add_uint32x4(sum);
}
-static VPX_FORCE_INLINE uint32_t highbd_sad8_avg_neon(
- const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr,
- int ref_stride, const uint8_t *second_pred, int width, int height) {
- int i, j;
- uint32x4_t sum_abs_diff = vdupq_n_u32(0);
+static INLINE uint32_t highbd_sad8xh_avg_neon(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride, int h,
+ const uint8_t *second_pred) {
const uint16_t *src16_ptr = CONVERT_TO_SHORTPTR(src_ptr);
const uint16_t *ref16_ptr = CONVERT_TO_SHORTPTR(ref_ptr);
- const uint16_t *pred_ptr = CONVERT_TO_SHORTPTR(second_pred);
- for (i = 0; i < height; i++) {
- for (j = 0; j < width; j += 8) {
- const uint16x8_t a_u16 = vld1q_u16(src16_ptr + j);
- const uint16x8_t b_u16 = vld1q_u16(ref16_ptr + j);
- const uint16x8_t c_u16 = vld1q_u16(pred_ptr + j);
- const uint16x8_t avg = vrhaddq_u16(b_u16, c_u16);
- sum_abs_diff =
- vabal_u16(sum_abs_diff, vget_low_u16(a_u16), vget_low_u16(avg));
- sum_abs_diff =
- vabal_u16(sum_abs_diff, vget_high_u16(a_u16), vget_high_u16(avg));
- }
+ const uint16_t *pred16_ptr = CONVERT_TO_SHORTPTR(second_pred);
+ uint32x4_t sum = vdupq_n_u32(0);
+
+ int i = h;
+ do {
+ uint16x8_t s = vld1q_u16(src16_ptr);
+ uint16x8_t r = vld1q_u16(ref16_ptr);
+ uint16x8_t p = vld1q_u16(pred16_ptr);
+
+ uint16x8_t avg = vrhaddq_u16(r, p);
+ uint16x8_t diff = vabdq_u16(s, avg);
+ sum = vpadalq_u16(sum, diff);
+
src16_ptr += src_stride;
ref16_ptr += ref_stride;
- pred_ptr += width;
- }
+ pred16_ptr += 8;
+ } while (--i != 0);
- return horizontal_add_uint32x4(sum_abs_diff);
+ return horizontal_add_uint32x4(sum);
}
-#define highbd_sad4MxN_avg(m, n) \
- unsigned int vpx_highbd_sad##m##x##n##_avg_neon( \
- const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, \
- int ref_stride, const uint8_t *second_pred) { \
- return highbd_sad4_avg_neon(src_ptr, src_stride, ref_ptr, ref_stride, \
- second_pred, m, n); \
- }
+static INLINE uint32_t highbd_sad16xh_avg_neon(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride, int h,
+ const uint8_t *second_pred) {
+ const uint16_t *src16_ptr = CONVERT_TO_SHORTPTR(src_ptr);
+ const uint16_t *ref16_ptr = CONVERT_TO_SHORTPTR(ref_ptr);
+ const uint16_t *pred16_ptr = CONVERT_TO_SHORTPTR(second_pred);
+ uint32x4_t sum[2] = { vdupq_n_u32(0), vdupq_n_u32(0) };
+
+ int i = h;
+ do {
+ uint16x8_t s0, s1, r0, r1, p0, p1;
+ uint16x8_t avg0, avg1, diff0, diff1;
+
+ s0 = vld1q_u16(src16_ptr);
+ r0 = vld1q_u16(ref16_ptr);
+ p0 = vld1q_u16(pred16_ptr);
+ avg0 = vrhaddq_u16(r0, p0);
+ diff0 = vabdq_u16(s0, avg0);
+ sum[0] = vpadalq_u16(sum[0], diff0);
+
+ s1 = vld1q_u16(src16_ptr + 8);
+ r1 = vld1q_u16(ref16_ptr + 8);
+ p1 = vld1q_u16(pred16_ptr + 8);
+ avg1 = vrhaddq_u16(r1, p1);
+ diff1 = vabdq_u16(s1, avg1);
+ sum[1] = vpadalq_u16(sum[1], diff1);
+
+ src16_ptr += src_stride;
+ ref16_ptr += ref_stride;
+ pred16_ptr += 16;
+ } while (--i != 0);
+
+ sum[0] = vaddq_u32(sum[0], sum[1]);
+ return horizontal_add_uint32x4(sum[0]);
+}
-#define highbd_sadMxN_avg(m, n) \
- unsigned int vpx_highbd_sad##m##x##n##_avg_neon( \
- const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, \
- int ref_stride, const uint8_t *second_pred) { \
- return highbd_sad8_avg_neon(src_ptr, src_stride, ref_ptr, ref_stride, \
- second_pred, m, n); \
+static INLINE uint32_t highbd_sadwxh_avg_neon(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride, int w, int h,
+ const uint8_t *second_pred) {
+ const uint16_t *src16_ptr = CONVERT_TO_SHORTPTR(src_ptr);
+ const uint16_t *ref16_ptr = CONVERT_TO_SHORTPTR(ref_ptr);
+ const uint16_t *pred16_ptr = CONVERT_TO_SHORTPTR(second_pred);
+ uint32x4_t sum[4] = { vdupq_n_u32(0), vdupq_n_u32(0), vdupq_n_u32(0),
+ vdupq_n_u32(0) };
+
+ int i = h;
+ do {
+ int j = 0;
+ do {
+ uint16x8_t s0, s1, s2, s3, r0, r1, r2, r3, p0, p1, p2, p3;
+ uint16x8_t avg0, avg1, avg2, avg3, diff0, diff1, diff2, diff3;
+
+ s0 = vld1q_u16(src16_ptr + j);
+ r0 = vld1q_u16(ref16_ptr + j);
+ p0 = vld1q_u16(pred16_ptr + j);
+ avg0 = vrhaddq_u16(r0, p0);
+ diff0 = vabdq_u16(s0, avg0);
+ sum[0] = vpadalq_u16(sum[0], diff0);
+
+ s1 = vld1q_u16(src16_ptr + j + 8);
+ r1 = vld1q_u16(ref16_ptr + j + 8);
+ p1 = vld1q_u16(pred16_ptr + j + 8);
+ avg1 = vrhaddq_u16(r1, p1);
+ diff1 = vabdq_u16(s1, avg1);
+ sum[1] = vpadalq_u16(sum[1], diff1);
+
+ s2 = vld1q_u16(src16_ptr + j + 16);
+ r2 = vld1q_u16(ref16_ptr + j + 16);
+ p2 = vld1q_u16(pred16_ptr + j + 16);
+ avg2 = vrhaddq_u16(r2, p2);
+ diff2 = vabdq_u16(s2, avg2);
+ sum[2] = vpadalq_u16(sum[2], diff2);
+
+ s3 = vld1q_u16(src16_ptr + j + 24);
+ r3 = vld1q_u16(ref16_ptr + j + 24);
+ p3 = vld1q_u16(pred16_ptr + j + 24);
+ avg3 = vrhaddq_u16(r3, p3);
+ diff3 = vabdq_u16(s3, avg3);
+ sum[3] = vpadalq_u16(sum[3], diff3);
+
+ j += 32;
+ } while (j < w);
+
+ src16_ptr += src_stride;
+ ref16_ptr += ref_stride;
+ pred16_ptr += w;
+ } while (--i != 0);
+
+ sum[0] = vaddq_u32(sum[0], sum[1]);
+ sum[2] = vaddq_u32(sum[2], sum[3]);
+ sum[0] = vaddq_u32(sum[0], sum[2]);
+
+ return horizontal_add_uint32x4(sum[0]);
+}
+
+static INLINE unsigned int highbd_sad64xh_avg_neon(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride, int h,
+ const uint8_t *second_pred) {
+ return highbd_sadwxh_avg_neon(src_ptr, src_stride, ref_ptr, ref_stride, 64, h,
+ second_pred);
+}
+
+static INLINE unsigned int highbd_sad32xh_avg_neon(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *ref_ptr,
+ int ref_stride, int h,
+ const uint8_t *second_pred) {
+ return highbd_sadwxh_avg_neon(src_ptr, src_stride, ref_ptr, ref_stride, 32, h,
+ second_pred);
+}
+
+#define HBD_SAD_WXH_AVG_NEON(w, h) \
+ uint32_t vpx_highbd_sad##w##x##h##_avg_neon( \
+ const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
+ const uint8_t *second_pred) { \
+ return highbd_sad##w##xh_avg_neon(src, src_stride, ref, ref_stride, (h), \
+ second_pred); \
}
+HBD_SAD_WXH_AVG_NEON(4, 4)
+HBD_SAD_WXH_AVG_NEON(4, 8)
+
+HBD_SAD_WXH_AVG_NEON(8, 4)
+HBD_SAD_WXH_AVG_NEON(8, 8)
+HBD_SAD_WXH_AVG_NEON(8, 16)
+
+HBD_SAD_WXH_AVG_NEON(16, 8)
+HBD_SAD_WXH_AVG_NEON(16, 16)
+HBD_SAD_WXH_AVG_NEON(16, 32)
+
+HBD_SAD_WXH_AVG_NEON(32, 16)
+HBD_SAD_WXH_AVG_NEON(32, 32)
+HBD_SAD_WXH_AVG_NEON(32, 64)
+
+HBD_SAD_WXH_AVG_NEON(64, 32)
+HBD_SAD_WXH_AVG_NEON(64, 64)
+
#define highbd_sadMxNx4D(m, n) \
void vpx_highbd_sad##m##x##n##x4d_neon( \
const uint8_t *src_ptr, int src_stride, \
@@ -261,54 +392,41 @@ static VPX_FORCE_INLINE uint32_t highbd_sad8_avg_neon(
/* clang-format off */
// 4x4
-highbd_sad4MxN_avg(4, 4)
highbd_sadMxNx4D(4, 4)
// 4x8
-highbd_sad4MxN_avg(4, 8)
highbd_sadMxNx4D(4, 8)
// 8x4
-highbd_sadMxN_avg(8, 4)
highbd_sadMxNx4D(8, 4)
// 8x8
-highbd_sadMxN_avg(8, 8)
highbd_sadMxNx4D(8, 8)
// 8x16
-highbd_sadMxN_avg(8, 16)
highbd_sadMxNx4D(8, 16)
// 16x8
-highbd_sadMxN_avg(16, 8)
highbd_sadMxNx4D(16, 8)
// 16x16
-highbd_sadMxN_avg(16, 16)
highbd_sadMxNx4D(16, 16)
// 16x32
-highbd_sadMxN_avg(16, 32)
highbd_sadMxNx4D(16, 32)
// 32x16
-highbd_sadMxN_avg(32, 16)
highbd_sadMxNx4D(32, 16)
// 32x32
-highbd_sadMxN_avg(32, 32)
highbd_sadMxNx4D(32, 32)
// 32x64
-highbd_sadMxN_avg(32, 64)
highbd_sadMxNx4D(32, 64)
// 64x32
-highbd_sadMxN_avg(64, 32)
highbd_sadMxNx4D(64, 32)
// 64x64
-highbd_sadMxN_avg(64, 64)
highbd_sadMxNx4D(64, 64)
/* clang-format on */