summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJonathan Wright <jonathan.wright@arm.com>2023-05-30 17:31:18 +0100
committerJonathan Wright <jonathan.wright@arm.com>2023-05-31 14:34:43 +0100
commitc36aa2e9c4a610dd7f5467126c894ac4dcbded02 (patch)
tree8f3ebabb7de5d4a1eb3af856801ff1b49bb44b94
parentc738e87f27ef8e12dd28b9052f446a5f69abf3c9 (diff)
downloadlibvpx-c36aa2e9c4a610dd7f5467126c894ac4dcbded02.tar
libvpx-c36aa2e9c4a610dd7f5467126c894ac4dcbded02.tar.gz
libvpx-c36aa2e9c4a610dd7f5467126c894ac4dcbded02.tar.bz2
libvpx-c36aa2e9c4a610dd7f5467126c894ac4dcbded02.zip
Optimize Neon implementation of vpx_int_pro_row
Double the number of accumulator registers to remove the bottleneck. Also peel the first loop iteration. Change-Id: I6a90680369f9c33cdfe14ea547ac1569ec3f50de
-rw-r--r--vpx_dsp/arm/avg_neon.c89
1 files changed, 44 insertions, 45 deletions
diff --git a/vpx_dsp/arm/avg_neon.c b/vpx_dsp/arm/avg_neon.c
index 2fe65d112..22164242c 100644
--- a/vpx_dsp/arm/avg_neon.c
+++ b/vpx_dsp/arm/avg_neon.c
@@ -70,54 +70,53 @@ int vpx_satd_neon(const tran_low_t *coeff, int length) {
void vpx_int_pro_row_neon(int16_t hbuf[16], uint8_t const *ref,
const int ref_stride, const int height) {
int i;
- uint16x8_t vec_sum_lo = vdupq_n_u16(0);
- uint16x8_t vec_sum_hi = vdupq_n_u16(0);
- const int shift_factor = ((height >> 5) + 3) * -1;
- const int16x8_t vec_shift = vdupq_n_s16(shift_factor);
-
- for (i = 0; i < height; i += 8) {
- const uint8x16_t vec_row1 = vld1q_u8(ref);
- const uint8x16_t vec_row2 = vld1q_u8(ref + ref_stride);
- const uint8x16_t vec_row3 = vld1q_u8(ref + ref_stride * 2);
- const uint8x16_t vec_row4 = vld1q_u8(ref + ref_stride * 3);
- const uint8x16_t vec_row5 = vld1q_u8(ref + ref_stride * 4);
- const uint8x16_t vec_row6 = vld1q_u8(ref + ref_stride * 5);
- const uint8x16_t vec_row7 = vld1q_u8(ref + ref_stride * 6);
- const uint8x16_t vec_row8 = vld1q_u8(ref + ref_stride * 7);
-
- vec_sum_lo = vaddw_u8(vec_sum_lo, vget_low_u8(vec_row1));
- vec_sum_hi = vaddw_u8(vec_sum_hi, vget_high_u8(vec_row1));
-
- vec_sum_lo = vaddw_u8(vec_sum_lo, vget_low_u8(vec_row2));
- vec_sum_hi = vaddw_u8(vec_sum_hi, vget_high_u8(vec_row2));
-
- vec_sum_lo = vaddw_u8(vec_sum_lo, vget_low_u8(vec_row3));
- vec_sum_hi = vaddw_u8(vec_sum_hi, vget_high_u8(vec_row3));
-
- vec_sum_lo = vaddw_u8(vec_sum_lo, vget_low_u8(vec_row4));
- vec_sum_hi = vaddw_u8(vec_sum_hi, vget_high_u8(vec_row4));
-
- vec_sum_lo = vaddw_u8(vec_sum_lo, vget_low_u8(vec_row5));
- vec_sum_hi = vaddw_u8(vec_sum_hi, vget_high_u8(vec_row5));
-
- vec_sum_lo = vaddw_u8(vec_sum_lo, vget_low_u8(vec_row6));
- vec_sum_hi = vaddw_u8(vec_sum_hi, vget_high_u8(vec_row6));
-
- vec_sum_lo = vaddw_u8(vec_sum_lo, vget_low_u8(vec_row7));
- vec_sum_hi = vaddw_u8(vec_sum_hi, vget_high_u8(vec_row7));
-
- vec_sum_lo = vaddw_u8(vec_sum_lo, vget_low_u8(vec_row8));
- vec_sum_hi = vaddw_u8(vec_sum_hi, vget_high_u8(vec_row8));
-
- ref += ref_stride * 8;
+ uint8x16_t r0, r1, r2, r3;
+ uint16x8_t sum_lo[2], sum_hi[2];
+ uint16x8_t tmp_lo[2], tmp_hi[2];
+ int16x8_t avg_lo, avg_hi;
+
+ const int norm_factor = (height >> 5) + 3;
+ const int16x8_t neg_norm_factor = vdupq_n_s16(-norm_factor);
+
+ assert(height >= 4 && height % 4 == 0);
+
+ r0 = vld1q_u8(ref + 0 * ref_stride);
+ r1 = vld1q_u8(ref + 1 * ref_stride);
+ r2 = vld1q_u8(ref + 2 * ref_stride);
+ r3 = vld1q_u8(ref + 3 * ref_stride);
+
+ sum_lo[0] = vaddl_u8(vget_low_u8(r0), vget_low_u8(r1));
+ sum_hi[0] = vaddl_u8(vget_high_u8(r0), vget_high_u8(r1));
+ sum_lo[1] = vaddl_u8(vget_low_u8(r2), vget_low_u8(r3));
+ sum_hi[1] = vaddl_u8(vget_high_u8(r2), vget_high_u8(r3));
+
+ for (i = 4; i < height; i += 4) {
+ r0 = vld1q_u8(ref + 0 * ref_stride);
+ r1 = vld1q_u8(ref + 1 * ref_stride);
+ r2 = vld1q_u8(ref + 2 * ref_stride);
+ r3 = vld1q_u8(ref + 3 * ref_stride);
+
+ tmp_lo[0] = vaddl_u8(vget_low_u8(r0), vget_low_u8(r1));
+ tmp_hi[0] = vaddl_u8(vget_high_u8(r0), vget_high_u8(r1));
+ tmp_lo[1] = vaddl_u8(vget_low_u8(r2), vget_low_u8(r3));
+ tmp_hi[1] = vaddl_u8(vget_high_u8(r2), vget_high_u8(r3));
+
+ sum_lo[0] = vaddq_u16(sum_lo[0], tmp_lo[0]);
+ sum_hi[0] = vaddq_u16(sum_hi[0], tmp_hi[0]);
+ sum_lo[1] = vaddq_u16(sum_lo[1], tmp_lo[1]);
+ sum_hi[1] = vaddq_u16(sum_hi[1], tmp_hi[1]);
+
+ ref += 4 * ref_stride;
}
- vec_sum_lo = vshlq_u16(vec_sum_lo, vec_shift);
- vec_sum_hi = vshlq_u16(vec_sum_hi, vec_shift);
+ sum_lo[0] = vaddq_u16(sum_lo[0], sum_lo[1]);
+ sum_hi[0] = vaddq_u16(sum_hi[0], sum_hi[1]);
+
+ avg_lo = vshlq_s16(vreinterpretq_s16_u16(sum_lo[0]), neg_norm_factor);
+ avg_hi = vshlq_s16(vreinterpretq_s16_u16(sum_hi[0]), neg_norm_factor);
- vst1q_s16(hbuf, vreinterpretq_s16_u16(vec_sum_lo));
- hbuf += 8;
- vst1q_s16(hbuf, vreinterpretq_s16_u16(vec_sum_hi));
+ vst1q_s16(hbuf, avg_lo);
+ vst1q_s16(hbuf + 8, avg_hi);
}
int16_t vpx_int_pro_col_neon(uint8_t const *ref, const int width) {