summaryrefslogtreecommitdiff
path: root/vpx_dsp/arm
diff options
context:
space:
mode:
Diffstat (limited to 'vpx_dsp/arm')
-rw-r--r--vpx_dsp/arm/loopfilter_mb_neon.asm16
-rw-r--r--vpx_dsp/arm/loopfilter_mb_neon.c7
-rw-r--r--vpx_dsp/arm/transpose_neon.h62
-rw-r--r--vpx_dsp/arm/vpx_convolve8_avg_neon.c363
-rw-r--r--vpx_dsp/arm/vpx_convolve8_neon.c1270
-rw-r--r--vpx_dsp/arm/vpx_convolve_avg_neon.c185
-rw-r--r--vpx_dsp/arm/vpx_convolve_copy_neon.c102
-rw-r--r--vpx_dsp/arm/vpx_convolve_neon.c10
8 files changed, 1219 insertions, 796 deletions
diff --git a/vpx_dsp/arm/loopfilter_mb_neon.asm b/vpx_dsp/arm/loopfilter_mb_neon.asm
index 5279ecfb7..730c40de0 100644
--- a/vpx_dsp/arm/loopfilter_mb_neon.asm
+++ b/vpx_dsp/arm/loopfilter_mb_neon.asm
@@ -8,8 +8,8 @@
; be found in the AUTHORS file in the root of the source tree.
;
- EXPORT |vpx_lpf_horizontal_edge_8_neon|
- EXPORT |vpx_lpf_horizontal_edge_16_neon|
+ EXPORT |vpx_lpf_horizontal_16_neon|
+ EXPORT |vpx_lpf_horizontal_16_dual_neon|
EXPORT |vpx_lpf_vertical_16_neon|
EXPORT |vpx_lpf_vertical_16_dual_neon|
ARM
@@ -119,7 +119,7 @@ h_next
ENDP ; |mb_lpf_horizontal_edge|
-; void vpx_lpf_horizontal_edge_8_neon(uint8_t *s, int pitch,
+; void vpx_lpf_horizontal_16_neon(uint8_t *s, int pitch,
; const uint8_t *blimit,
; const uint8_t *limit,
; const uint8_t *thresh)
@@ -128,12 +128,12 @@ h_next
; r2 const uint8_t *blimit,
; r3 const uint8_t *limit,
; sp const uint8_t *thresh
-|vpx_lpf_horizontal_edge_8_neon| PROC
+|vpx_lpf_horizontal_16_neon| PROC
mov r12, #1
b mb_lpf_horizontal_edge
- ENDP ; |vpx_lpf_horizontal_edge_8_neon|
+ ENDP ; |vpx_lpf_horizontal_16_neon|
-; void vpx_lpf_horizontal_edge_16_neon(uint8_t *s, int pitch,
+; void vpx_lpf_horizontal_16_dual_neon(uint8_t *s, int pitch,
; const uint8_t *blimit,
; const uint8_t *limit,
; const uint8_t *thresh)
@@ -142,10 +142,10 @@ h_next
; r2 const uint8_t *blimit,
; r3 const uint8_t *limit,
; sp const uint8_t *thresh
-|vpx_lpf_horizontal_edge_16_neon| PROC
+|vpx_lpf_horizontal_16_dual_neon| PROC
mov r12, #2
b mb_lpf_horizontal_edge
- ENDP ; |vpx_lpf_horizontal_edge_16_neon|
+ ENDP ; |vpx_lpf_horizontal_16_dual_neon|
; void mb_lpf_vertical_edge_w(uint8_t *s, int p, const uint8_t *blimit,
; const uint8_t *limit, const uint8_t *thresh,
diff --git a/vpx_dsp/arm/loopfilter_mb_neon.c b/vpx_dsp/arm/loopfilter_mb_neon.c
index f95267472..fc080163b 100644
--- a/vpx_dsp/arm/loopfilter_mb_neon.c
+++ b/vpx_dsp/arm/loopfilter_mb_neon.c
@@ -975,9 +975,8 @@ FUN_LPF_16_KERNEL(_, 8) // lpf_16_kernel
FUN_LPF_16_KERNEL(_dual_, 16) // lpf_16_dual_kernel
#undef FUN_LPF_16_KERNEL
-void vpx_lpf_horizontal_edge_8_neon(uint8_t *s, int p, const uint8_t *blimit,
- const uint8_t *limit,
- const uint8_t *thresh) {
+void vpx_lpf_horizontal_16_neon(uint8_t *s, int p, const uint8_t *blimit,
+ const uint8_t *limit, const uint8_t *thresh) {
uint8x8_t p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7, op6,
op5, op4, op3, op2, op1, op0, oq0, oq1, oq2, oq3, oq4, oq5, oq6;
uint32_t flat_status, flat2_status;
@@ -992,7 +991,7 @@ void vpx_lpf_horizontal_edge_8_neon(uint8_t *s, int p, const uint8_t *blimit,
oq5, oq6, flat_status, flat2_status);
}
-void vpx_lpf_horizontal_edge_16_neon(uint8_t *s, int p, const uint8_t *blimit,
+void vpx_lpf_horizontal_16_dual_neon(uint8_t *s, int p, const uint8_t *blimit,
const uint8_t *limit,
const uint8_t *thresh) {
uint8x16_t p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7,
diff --git a/vpx_dsp/arm/transpose_neon.h b/vpx_dsp/arm/transpose_neon.h
index 3d0b41f93..55188c5bc 100644
--- a/vpx_dsp/arm/transpose_neon.h
+++ b/vpx_dsp/arm/transpose_neon.h
@@ -39,6 +39,68 @@ static INLINE uint8x16x2_t vpx_vtrnq_u64(uint32x4_t a0, uint32x4_t a1) {
return b0;
}
+static INLINE void transpose_u8_4x4(uint8x8_t *a0, uint8x8_t *a1) {
+ // Swap 16 bit elements. Goes from:
+ // a0: 00 01 02 03 10 11 12 13
+ // a1: 20 21 22 23 30 31 32 33
+ // to:
+ // b0.val[0]: 00 01 20 21 10 11 30 31
+ // b0.val[1]: 02 03 22 23 12 13 32 33
+
+ const uint16x4x2_t b0 =
+ vtrn_u16(vreinterpret_u16_u8(*a0), vreinterpret_u16_u8(*a1));
+
+ // Swap 32 bit elements resulting in:
+ // c0.val[0]: 00 01 20 21 02 03 22 23
+ // c0.val[1]: 10 11 30 31 12 13 32 33
+
+ const uint32x2x2_t c0 = vtrn_u32(vreinterpret_u32_u16(b0.val[0]),
+ vreinterpret_u32_u16(b0.val[1]));
+
+ // Swap 8 bit elements resulting in:
+ // d0.val[0]: 00 10 20 30 02 12 22 32
+ // d0.val[1]: 01 11 21 31 03 13 23 33
+
+ const uint8x8x2_t d0 =
+ vtrn_u8(vreinterpret_u8_u32(c0.val[0]), vreinterpret_u8_u32(c0.val[1]));
+
+ *a0 = d0.val[0];
+ *a1 = d0.val[1];
+}
+
+static INLINE void transpose_u8_8x4(uint8x8_t *a0, uint8x8_t *a1, uint8x8_t *a2,
+ uint8x8_t *a3) {
+ // Swap 8 bit elements. Goes from:
+ // a0: 00 01 02 03 04 05 06 07
+ // a1: 10 11 12 13 14 15 16 17
+ // a2: 20 21 22 23 24 25 26 27
+ // a3: 30 31 32 33 34 35 36 37
+ // to:
+ // b0.val[0]: 00 10 02 12 04 14 06 16
+ // b0.val[1]: 01 11 03 13 05 15 07 17
+ // b1.val[0]: 20 30 22 32 24 34 26 36
+ // b1.val[1]: 21 31 23 33 25 35 27 37
+
+ const uint8x8x2_t b0 = vtrn_u8(*a0, *a1);
+ const uint8x8x2_t b1 = vtrn_u8(*a2, *a3);
+
+ // Swap 16 bit elements resulting in:
+ // c0.val[0]: 00 10 20 30 04 14 24 34
+ // c0.val[1]: 02 12 22 32 06 16 26 36
+ // c1.val[0]: 01 11 21 31 05 15 25 35
+ // c1.val[1]: 03 13 23 33 07 17 27 37
+
+ const uint16x4x2_t c0 =
+ vtrn_u16(vreinterpret_u16_u8(b0.val[0]), vreinterpret_u16_u8(b1.val[0]));
+ const uint16x4x2_t c1 =
+ vtrn_u16(vreinterpret_u16_u8(b0.val[1]), vreinterpret_u16_u8(b1.val[1]));
+
+ *a0 = vreinterpret_u8_u16(c0.val[0]);
+ *a1 = vreinterpret_u8_u16(c1.val[0]);
+ *a2 = vreinterpret_u8_u16(c0.val[1]);
+ *a3 = vreinterpret_u8_u16(c1.val[1]);
+}
+
// Note: Using 'd' registers or 'q' registers has almost identical speed. We use
// 'q' registers here to save some instructions.
static INLINE void transpose_u8_8x8(uint8x8_t *a0, uint8x8_t *a1, uint8x8_t *a2,
diff --git a/vpx_dsp/arm/vpx_convolve8_avg_neon.c b/vpx_dsp/arm/vpx_convolve8_avg_neon.c
deleted file mode 100644
index 8e5373be0..000000000
--- a/vpx_dsp/arm/vpx_convolve8_avg_neon.c
+++ /dev/null
@@ -1,363 +0,0 @@
-/*
- * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <arm_neon.h>
-#include <assert.h>
-
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
-#include "vpx/vpx_integer.h"
-#include "vpx_ports/mem.h"
-
-static INLINE int32x4_t MULTIPLY_BY_Q0(int16x4_t dsrc0, int16x4_t dsrc1,
- int16x4_t dsrc2, int16x4_t dsrc3,
- int16x4_t dsrc4, int16x4_t dsrc5,
- int16x4_t dsrc6, int16x4_t dsrc7,
- int16x8_t q0s16) {
- int32x4_t qdst;
- int16x4_t d0s16, d1s16;
-
- d0s16 = vget_low_s16(q0s16);
- d1s16 = vget_high_s16(q0s16);
-
- qdst = vmull_lane_s16(dsrc0, d0s16, 0);
- qdst = vmlal_lane_s16(qdst, dsrc1, d0s16, 1);
- qdst = vmlal_lane_s16(qdst, dsrc2, d0s16, 2);
- qdst = vmlal_lane_s16(qdst, dsrc3, d0s16, 3);
- qdst = vmlal_lane_s16(qdst, dsrc4, d1s16, 0);
- qdst = vmlal_lane_s16(qdst, dsrc5, d1s16, 1);
- qdst = vmlal_lane_s16(qdst, dsrc6, d1s16, 2);
- qdst = vmlal_lane_s16(qdst, dsrc7, d1s16, 3);
- return qdst;
-}
-
-void vpx_convolve8_avg_horiz_neon(const uint8_t *src, ptrdiff_t src_stride,
- uint8_t *dst, ptrdiff_t dst_stride,
- const int16_t *filter_x, int x_step_q4,
- const int16_t *filter_y, // unused
- int y_step_q4, // unused
- int w, int h) {
- int width;
- const uint8_t *s;
- uint8_t *d;
- uint8x8_t d2u8, d3u8, d24u8, d25u8, d26u8, d27u8, d28u8, d29u8;
- uint32x2_t d2u32, d3u32, d6u32, d7u32, d28u32, d29u32, d30u32, d31u32;
- uint8x16_t q1u8, q3u8, q12u8, q13u8, q14u8, q15u8;
- int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d22s16, d23s16;
- int16x4_t d24s16, d25s16, d26s16, d27s16;
- uint16x4_t d2u16, d3u16, d4u16, d5u16, d16u16, d17u16, d18u16, d19u16;
- int16x8_t q0s16;
- uint16x8_t q1u16, q2u16, q8u16, q9u16, q10u16, q11u16, q12u16, q13u16;
- int32x4_t q1s32, q2s32, q14s32, q15s32;
- uint16x8x2_t q0x2u16;
- uint8x8x2_t d0x2u8, d1x2u8;
- uint32x2x2_t d0x2u32;
- uint16x4x2_t d0x2u16, d1x2u16;
- uint32x4x2_t q0x2u32;
-
- assert(x_step_q4 == 16);
-
- (void)x_step_q4;
- (void)y_step_q4;
- (void)filter_y;
-
- q0s16 = vld1q_s16(filter_x);
-
- src -= 3; // adjust for taps
- for (; h > 0; h -= 4) { // loop_horiz_v
- s = src;
- d24u8 = vld1_u8(s);
- s += src_stride;
- d25u8 = vld1_u8(s);
- s += src_stride;
- d26u8 = vld1_u8(s);
- s += src_stride;
- d27u8 = vld1_u8(s);
-
- q12u8 = vcombine_u8(d24u8, d25u8);
- q13u8 = vcombine_u8(d26u8, d27u8);
-
- q0x2u16 =
- vtrnq_u16(vreinterpretq_u16_u8(q12u8), vreinterpretq_u16_u8(q13u8));
- d24u8 = vreinterpret_u8_u16(vget_low_u16(q0x2u16.val[0]));
- d25u8 = vreinterpret_u8_u16(vget_high_u16(q0x2u16.val[0]));
- d26u8 = vreinterpret_u8_u16(vget_low_u16(q0x2u16.val[1]));
- d27u8 = vreinterpret_u8_u16(vget_high_u16(q0x2u16.val[1]));
- d0x2u8 = vtrn_u8(d24u8, d25u8);
- d1x2u8 = vtrn_u8(d26u8, d27u8);
-
- __builtin_prefetch(src + src_stride * 4);
- __builtin_prefetch(src + src_stride * 5);
-
- q8u16 = vmovl_u8(d0x2u8.val[0]);
- q9u16 = vmovl_u8(d0x2u8.val[1]);
- q10u16 = vmovl_u8(d1x2u8.val[0]);
- q11u16 = vmovl_u8(d1x2u8.val[1]);
-
- src += 7;
- d16u16 = vget_low_u16(q8u16);
- d17u16 = vget_high_u16(q8u16);
- d18u16 = vget_low_u16(q9u16);
- d19u16 = vget_high_u16(q9u16);
- q8u16 = vcombine_u16(d16u16, d18u16); // vswp 17 18
- q9u16 = vcombine_u16(d17u16, d19u16);
-
- d20s16 = vreinterpret_s16_u16(vget_low_u16(q10u16));
- d23s16 = vreinterpret_s16_u16(vget_high_u16(q10u16)); // vmov 23 21
- for (width = w; width > 0; width -= 4, src += 4, dst += 4) { // loop_horiz
- s = src;
- d28u32 = vld1_dup_u32((const uint32_t *)s);
- s += src_stride;
- d29u32 = vld1_dup_u32((const uint32_t *)s);
- s += src_stride;
- d31u32 = vld1_dup_u32((const uint32_t *)s);
- s += src_stride;
- d30u32 = vld1_dup_u32((const uint32_t *)s);
-
- __builtin_prefetch(src + 64);
-
- d0x2u16 =
- vtrn_u16(vreinterpret_u16_u32(d28u32), vreinterpret_u16_u32(d31u32));
- d1x2u16 =
- vtrn_u16(vreinterpret_u16_u32(d29u32), vreinterpret_u16_u32(d30u32));
- d0x2u8 = vtrn_u8(vreinterpret_u8_u16(d0x2u16.val[0]), // d28
- vreinterpret_u8_u16(d1x2u16.val[0])); // d29
- d1x2u8 = vtrn_u8(vreinterpret_u8_u16(d0x2u16.val[1]), // d31
- vreinterpret_u8_u16(d1x2u16.val[1])); // d30
-
- __builtin_prefetch(src + 64 + src_stride);
-
- q14u8 = vcombine_u8(d0x2u8.val[0], d0x2u8.val[1]);
- q15u8 = vcombine_u8(d1x2u8.val[1], d1x2u8.val[0]);
- q0x2u32 =
- vtrnq_u32(vreinterpretq_u32_u8(q14u8), vreinterpretq_u32_u8(q15u8));
-
- d28u8 = vreinterpret_u8_u32(vget_low_u32(q0x2u32.val[0]));
- d29u8 = vreinterpret_u8_u32(vget_high_u32(q0x2u32.val[0]));
- q12u16 = vmovl_u8(d28u8);
- q13u16 = vmovl_u8(d29u8);
-
- __builtin_prefetch(src + 64 + src_stride * 2);
-
- d = dst;
- d6u32 = vld1_lane_u32((const uint32_t *)d, d6u32, 0);
- d += dst_stride;
- d7u32 = vld1_lane_u32((const uint32_t *)d, d7u32, 0);
- d += dst_stride;
- d6u32 = vld1_lane_u32((const uint32_t *)d, d6u32, 1);
- d += dst_stride;
- d7u32 = vld1_lane_u32((const uint32_t *)d, d7u32, 1);
-
- d16s16 = vreinterpret_s16_u16(vget_low_u16(q8u16));
- d17s16 = vreinterpret_s16_u16(vget_high_u16(q8u16));
- d18s16 = vreinterpret_s16_u16(vget_low_u16(q9u16));
- d19s16 = vreinterpret_s16_u16(vget_high_u16(q9u16));
- d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16));
- d24s16 = vreinterpret_s16_u16(vget_low_u16(q12u16));
- d25s16 = vreinterpret_s16_u16(vget_high_u16(q12u16));
- d26s16 = vreinterpret_s16_u16(vget_low_u16(q13u16));
- d27s16 = vreinterpret_s16_u16(vget_high_u16(q13u16));
-
- q1s32 = MULTIPLY_BY_Q0(d16s16, d17s16, d20s16, d22s16, d18s16, d19s16,
- d23s16, d24s16, q0s16);
- q2s32 = MULTIPLY_BY_Q0(d17s16, d20s16, d22s16, d18s16, d19s16, d23s16,
- d24s16, d26s16, q0s16);
- q14s32 = MULTIPLY_BY_Q0(d20s16, d22s16, d18s16, d19s16, d23s16, d24s16,
- d26s16, d27s16, q0s16);
- q15s32 = MULTIPLY_BY_Q0(d22s16, d18s16, d19s16, d23s16, d24s16, d26s16,
- d27s16, d25s16, q0s16);
-
- __builtin_prefetch(src + 64 + src_stride * 3);
-
- d2u16 = vqrshrun_n_s32(q1s32, 7);
- d3u16 = vqrshrun_n_s32(q2s32, 7);
- d4u16 = vqrshrun_n_s32(q14s32, 7);
- d5u16 = vqrshrun_n_s32(q15s32, 7);
-
- q1u16 = vcombine_u16(d2u16, d3u16);
- q2u16 = vcombine_u16(d4u16, d5u16);
-
- d2u8 = vqmovn_u16(q1u16);
- d3u8 = vqmovn_u16(q2u16);
-
- d0x2u16 = vtrn_u16(vreinterpret_u16_u8(d2u8), vreinterpret_u16_u8(d3u8));
- d0x2u32 = vtrn_u32(vreinterpret_u32_u16(d0x2u16.val[0]),
- vreinterpret_u32_u16(d0x2u16.val[1]));
- d0x2u8 = vtrn_u8(vreinterpret_u8_u32(d0x2u32.val[0]),
- vreinterpret_u8_u32(d0x2u32.val[1]));
-
- q1u8 = vcombine_u8(d0x2u8.val[0], d0x2u8.val[1]);
- q3u8 = vreinterpretq_u8_u32(vcombine_u32(d6u32, d7u32));
-
- q1u8 = vrhaddq_u8(q1u8, q3u8);
-
- d2u32 = vreinterpret_u32_u8(vget_low_u8(q1u8));
- d3u32 = vreinterpret_u32_u8(vget_high_u8(q1u8));
-
- d = dst;
- vst1_lane_u32((uint32_t *)d, d2u32, 0);
- d += dst_stride;
- vst1_lane_u32((uint32_t *)d, d3u32, 0);
- d += dst_stride;
- vst1_lane_u32((uint32_t *)d, d2u32, 1);
- d += dst_stride;
- vst1_lane_u32((uint32_t *)d, d3u32, 1);
-
- q8u16 = q9u16;
- d20s16 = d23s16;
- q11u16 = q12u16;
- q9u16 = q13u16;
- d23s16 = vreinterpret_s16_u16(vget_high_u16(q11u16));
- }
- src += src_stride * 4 - w - 7;
- dst += dst_stride * 4 - w;
- }
- return;
-}
-
-void vpx_convolve8_avg_vert_neon(const uint8_t *src, ptrdiff_t src_stride,
- uint8_t *dst, ptrdiff_t dst_stride,
- const int16_t *filter_x, // unused
- int x_step_q4, // unused
- const int16_t *filter_y, int y_step_q4, int w,
- int h) {
- int height;
- const uint8_t *s;
- uint8_t *d;
- uint8x8_t d2u8, d3u8;
- uint32x2_t d2u32, d3u32, d6u32, d7u32;
- uint32x2_t d16u32, d18u32, d20u32, d22u32, d24u32, d26u32;
- uint8x16_t q1u8, q3u8;
- int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16, d22s16;
- int16x4_t d24s16, d25s16, d26s16, d27s16;
- uint16x4_t d2u16, d3u16, d4u16, d5u16;
- int16x8_t q0s16;
- uint16x8_t q1u16, q2u16, q8u16, q9u16, q10u16, q11u16, q12u16, q13u16;
- int32x4_t q1s32, q2s32, q14s32, q15s32;
-
- assert(y_step_q4 == 16);
-
- (void)x_step_q4;
- (void)y_step_q4;
- (void)filter_x;
-
- src -= src_stride * 3;
- q0s16 = vld1q_s16(filter_y);
- for (; w > 0; w -= 4, src += 4, dst += 4) { // loop_vert_h
- s = src;
- d16u32 = vld1_lane_u32((const uint32_t *)s, d16u32, 0);
- s += src_stride;
- d16u32 = vld1_lane_u32((const uint32_t *)s, d16u32, 1);
- s += src_stride;
- d18u32 = vld1_lane_u32((const uint32_t *)s, d18u32, 0);
- s += src_stride;
- d18u32 = vld1_lane_u32((const uint32_t *)s, d18u32, 1);
- s += src_stride;
- d20u32 = vld1_lane_u32((const uint32_t *)s, d20u32, 0);
- s += src_stride;
- d20u32 = vld1_lane_u32((const uint32_t *)s, d20u32, 1);
- s += src_stride;
- d22u32 = vld1_lane_u32((const uint32_t *)s, d22u32, 0);
- s += src_stride;
-
- q8u16 = vmovl_u8(vreinterpret_u8_u32(d16u32));
- q9u16 = vmovl_u8(vreinterpret_u8_u32(d18u32));
- q10u16 = vmovl_u8(vreinterpret_u8_u32(d20u32));
- q11u16 = vmovl_u8(vreinterpret_u8_u32(d22u32));
-
- d18s16 = vreinterpret_s16_u16(vget_low_u16(q9u16));
- d19s16 = vreinterpret_s16_u16(vget_high_u16(q9u16));
- d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16));
- d = dst;
- for (height = h; height > 0; height -= 4) { // loop_vert
- d24u32 = vld1_lane_u32((const uint32_t *)s, d24u32, 0);
- s += src_stride;
- d26u32 = vld1_lane_u32((const uint32_t *)s, d26u32, 0);
- s += src_stride;
- d26u32 = vld1_lane_u32((const uint32_t *)s, d26u32, 1);
- s += src_stride;
- d24u32 = vld1_lane_u32((const uint32_t *)s, d24u32, 1);
- s += src_stride;
-
- q12u16 = vmovl_u8(vreinterpret_u8_u32(d24u32));
- q13u16 = vmovl_u8(vreinterpret_u8_u32(d26u32));
-
- d6u32 = vld1_lane_u32((const uint32_t *)d, d6u32, 0);
- d += dst_stride;
- d6u32 = vld1_lane_u32((const uint32_t *)d, d6u32, 1);
- d += dst_stride;
- d7u32 = vld1_lane_u32((const uint32_t *)d, d7u32, 0);
- d += dst_stride;
- d7u32 = vld1_lane_u32((const uint32_t *)d, d7u32, 1);
- d -= dst_stride * 3;
-
- d16s16 = vreinterpret_s16_u16(vget_low_u16(q8u16));
- d17s16 = vreinterpret_s16_u16(vget_high_u16(q8u16));
- d20s16 = vreinterpret_s16_u16(vget_low_u16(q10u16));
- d21s16 = vreinterpret_s16_u16(vget_high_u16(q10u16));
- d24s16 = vreinterpret_s16_u16(vget_low_u16(q12u16));
- d25s16 = vreinterpret_s16_u16(vget_high_u16(q12u16));
- d26s16 = vreinterpret_s16_u16(vget_low_u16(q13u16));
- d27s16 = vreinterpret_s16_u16(vget_high_u16(q13u16));
-
- __builtin_prefetch(s);
- __builtin_prefetch(s + src_stride);
- q1s32 = MULTIPLY_BY_Q0(d16s16, d17s16, d18s16, d19s16, d20s16, d21s16,
- d22s16, d24s16, q0s16);
- __builtin_prefetch(s + src_stride * 2);
- __builtin_prefetch(s + src_stride * 3);
- q2s32 = MULTIPLY_BY_Q0(d17s16, d18s16, d19s16, d20s16, d21s16, d22s16,
- d24s16, d26s16, q0s16);
- __builtin_prefetch(d);
- __builtin_prefetch(d + dst_stride);
- q14s32 = MULTIPLY_BY_Q0(d18s16, d19s16, d20s16, d21s16, d22s16, d24s16,
- d26s16, d27s16, q0s16);
- __builtin_prefetch(d + dst_stride * 2);
- __builtin_prefetch(d + dst_stride * 3);
- q15s32 = MULTIPLY_BY_Q0(d19s16, d20s16, d21s16, d22s16, d24s16, d26s16,
- d27s16, d25s16, q0s16);
-
- d2u16 = vqrshrun_n_s32(q1s32, 7);
- d3u16 = vqrshrun_n_s32(q2s32, 7);
- d4u16 = vqrshrun_n_s32(q14s32, 7);
- d5u16 = vqrshrun_n_s32(q15s32, 7);
-
- q1u16 = vcombine_u16(d2u16, d3u16);
- q2u16 = vcombine_u16(d4u16, d5u16);
-
- d2u8 = vqmovn_u16(q1u16);
- d3u8 = vqmovn_u16(q2u16);
-
- q1u8 = vcombine_u8(d2u8, d3u8);
- q3u8 = vreinterpretq_u8_u32(vcombine_u32(d6u32, d7u32));
-
- q1u8 = vrhaddq_u8(q1u8, q3u8);
-
- d2u32 = vreinterpret_u32_u8(vget_low_u8(q1u8));
- d3u32 = vreinterpret_u32_u8(vget_high_u8(q1u8));
-
- vst1_lane_u32((uint32_t *)d, d2u32, 0);
- d += dst_stride;
- vst1_lane_u32((uint32_t *)d, d2u32, 1);
- d += dst_stride;
- vst1_lane_u32((uint32_t *)d, d3u32, 0);
- d += dst_stride;
- vst1_lane_u32((uint32_t *)d, d3u32, 1);
- d += dst_stride;
-
- q8u16 = q10u16;
- d18s16 = d22s16;
- d19s16 = d24s16;
- q10u16 = q13u16;
- d22s16 = d25s16;
- }
- }
- return;
-}
diff --git a/vpx_dsp/arm/vpx_convolve8_neon.c b/vpx_dsp/arm/vpx_convolve8_neon.c
index 951c425e2..01fa67acf 100644
--- a/vpx_dsp/arm/vpx_convolve8_neon.c
+++ b/vpx_dsp/arm/vpx_convolve8_neon.c
@@ -14,28 +14,113 @@
#include "./vpx_config.h"
#include "./vpx_dsp_rtcd.h"
#include "vpx/vpx_integer.h"
+#include "vpx_dsp/arm/transpose_neon.h"
#include "vpx_ports/mem.h"
-static INLINE int32x4_t MULTIPLY_BY_Q0(int16x4_t dsrc0, int16x4_t dsrc1,
- int16x4_t dsrc2, int16x4_t dsrc3,
- int16x4_t dsrc4, int16x4_t dsrc5,
- int16x4_t dsrc6, int16x4_t dsrc7,
- int16x8_t q0s16) {
- int32x4_t qdst;
- int16x4_t d0s16, d1s16;
-
- d0s16 = vget_low_s16(q0s16);
- d1s16 = vget_high_s16(q0s16);
-
- qdst = vmull_lane_s16(dsrc0, d0s16, 0);
- qdst = vmlal_lane_s16(qdst, dsrc1, d0s16, 1);
- qdst = vmlal_lane_s16(qdst, dsrc2, d0s16, 2);
- qdst = vmlal_lane_s16(qdst, dsrc3, d0s16, 3);
- qdst = vmlal_lane_s16(qdst, dsrc4, d1s16, 0);
- qdst = vmlal_lane_s16(qdst, dsrc5, d1s16, 1);
- qdst = vmlal_lane_s16(qdst, dsrc6, d1s16, 2);
- qdst = vmlal_lane_s16(qdst, dsrc7, d1s16, 3);
- return qdst;
+// Note:
+// 1. src is not always 32-bit aligned, so don't call vld1_lane_u32(src).
+// 2. After refactoring the shared code in kernel loops with inline functions,
+// the decoder speed dropped a lot when using gcc compiler. Therefore there is
+// no refactoring for those parts by now.
+// 3. For horizontal convolve, there is an alternative optimization that
+// convolves a single row in each loop. For each row, 8 sample banks with 4 or 8
+// samples in each are read from memory: src, (src+1), (src+2), (src+3),
+// (src+4), (src+5), (src+6), (src+7), or prepared by vector extract
+// instructions. This optimization is much faster in speed unit test, but slowed
+// down the whole decoder by 5%.
+
+static INLINE void load_8x4(const uint8_t *s, const int p, uint8x8_t *s0,
+ uint8x8_t *s1, uint8x8_t *s2, uint8x8_t *s3) {
+ *s0 = vld1_u8(s);
+ s += p;
+ *s1 = vld1_u8(s);
+ s += p;
+ *s2 = vld1_u8(s);
+ s += p;
+ *s3 = vld1_u8(s);
+}
+
+static INLINE void load_8x8(const uint8_t *s, const int p, uint8x8_t *s0,
+ uint8x8_t *s1, uint8x8_t *s2, uint8x8_t *s3,
+ uint8x8_t *s4, uint8x8_t *s5, uint8x8_t *s6,
+ uint8x8_t *s7) {
+ *s0 = vld1_u8(s);
+ s += p;
+ *s1 = vld1_u8(s);
+ s += p;
+ *s2 = vld1_u8(s);
+ s += p;
+ *s3 = vld1_u8(s);
+ s += p;
+ *s4 = vld1_u8(s);
+ s += p;
+ *s5 = vld1_u8(s);
+ s += p;
+ *s6 = vld1_u8(s);
+ s += p;
+ *s7 = vld1_u8(s);
+}
+
+static INLINE void store_8x8(uint8_t *s, const int p, const uint8x8_t s0,
+ const uint8x8_t s1, const uint8x8_t s2,
+ const uint8x8_t s3, const uint8x8_t s4,
+ const uint8x8_t s5, const uint8x8_t s6,
+ const uint8x8_t s7) {
+ vst1_u8(s, s0);
+ s += p;
+ vst1_u8(s, s1);
+ s += p;
+ vst1_u8(s, s2);
+ s += p;
+ vst1_u8(s, s3);
+ s += p;
+ vst1_u8(s, s4);
+ s += p;
+ vst1_u8(s, s5);
+ s += p;
+ vst1_u8(s, s6);
+ s += p;
+ vst1_u8(s, s7);
+}
+
+static INLINE int16x4_t convolve8_4(int16x4_t s0, int16x4_t s1, int16x4_t s2,
+ int16x4_t s3, int16x4_t s4, int16x4_t s5,
+ int16x4_t s6, int16x4_t s7,
+ int16x8_t filters, int16x4_t filter3,
+ int16x4_t filter4) {
+ const int16x4_t filters_lo = vget_low_s16(filters);
+ const int16x4_t filters_hi = vget_high_s16(filters);
+ int16x4_t sum = vdup_n_s16(0);
+
+ sum = vmla_lane_s16(sum, s0, filters_lo, 0);
+ sum = vmla_lane_s16(sum, s1, filters_lo, 1);
+ sum = vmla_lane_s16(sum, s2, filters_lo, 2);
+ sum = vmla_lane_s16(sum, s5, filters_hi, 1);
+ sum = vmla_lane_s16(sum, s6, filters_hi, 2);
+ sum = vmla_lane_s16(sum, s7, filters_hi, 3);
+ sum = vqadd_s16(sum, vmul_s16(s3, filter3));
+ sum = vqadd_s16(sum, vmul_s16(s4, filter4));
+ return sum;
+}
+
+static INLINE int16x8_t convolve8_8(int16x8_t s0, int16x8_t s1, int16x8_t s2,
+ int16x8_t s3, int16x8_t s4, int16x8_t s5,
+ int16x8_t s6, int16x8_t s7,
+ int16x8_t filters, int16x8_t filter3,
+ int16x8_t filter4) {
+ const int16x4_t filters_lo = vget_low_s16(filters);
+ const int16x4_t filters_hi = vget_high_s16(filters);
+ int16x8_t sum = vdupq_n_s16(0);
+
+ sum = vmlaq_lane_s16(sum, s0, filters_lo, 0);
+ sum = vmlaq_lane_s16(sum, s1, filters_lo, 1);
+ sum = vmlaq_lane_s16(sum, s2, filters_lo, 2);
+ sum = vmlaq_lane_s16(sum, s5, filters_hi, 1);
+ sum = vmlaq_lane_s16(sum, s6, filters_hi, 2);
+ sum = vmlaq_lane_s16(sum, s7, filters_hi, 3);
+ sum = vqaddq_s16(sum, vmulq_s16(s3, filter3));
+ sum = vqaddq_s16(sum, vmulq_s16(s4, filter4));
+ return sum;
}
void vpx_convolve8_horiz_neon(const uint8_t *src, ptrdiff_t src_stride,
@@ -44,168 +129,565 @@ void vpx_convolve8_horiz_neon(const uint8_t *src, ptrdiff_t src_stride,
const int16_t *filter_y, // unused
int y_step_q4, // unused
int w, int h) {
- int width;
- const uint8_t *s, *psrc;
- uint8_t *d, *pdst;
- uint8x8_t d2u8, d3u8, d24u8, d25u8, d26u8, d27u8, d28u8, d29u8;
- uint32x2_t d2u32, d3u32, d28u32, d29u32, d30u32, d31u32;
- uint8x16_t q12u8, q13u8, q14u8, q15u8;
- int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d22s16, d23s16;
- int16x4_t d24s16, d25s16, d26s16, d27s16;
- uint16x4_t d2u16, d3u16, d4u16, d5u16, d16u16, d17u16, d18u16, d19u16;
- int16x8_t q0s16;
- uint16x8_t q1u16, q2u16, q8u16, q9u16, q10u16, q11u16, q12u16, q13u16;
- int32x4_t q1s32, q2s32, q14s32, q15s32;
- uint16x8x2_t q0x2u16;
- uint8x8x2_t d0x2u8, d1x2u8;
- uint32x2x2_t d0x2u32;
- uint16x4x2_t d0x2u16, d1x2u16;
- uint32x4x2_t q0x2u32;
+ const int16x8_t filters = vld1q_s16(filter_x);
+ uint8x8_t t0, t1, t2, t3;
+ assert(!((intptr_t)dst & 3));
+ assert(!(dst_stride & 3));
assert(x_step_q4 == 16);
(void)x_step_q4;
(void)y_step_q4;
(void)filter_y;
- q0s16 = vld1q_s16(filter_x);
-
- src -= 3; // adjust for taps
- for (; h > 0; h -= 4, src += src_stride * 4,
- dst += dst_stride * 4) { // loop_horiz_v
- s = src;
- d24u8 = vld1_u8(s);
- s += src_stride;
- d25u8 = vld1_u8(s);
- s += src_stride;
- d26u8 = vld1_u8(s);
- s += src_stride;
- d27u8 = vld1_u8(s);
-
- q12u8 = vcombine_u8(d24u8, d25u8);
- q13u8 = vcombine_u8(d26u8, d27u8);
-
- q0x2u16 =
- vtrnq_u16(vreinterpretq_u16_u8(q12u8), vreinterpretq_u16_u8(q13u8));
- d24u8 = vreinterpret_u8_u16(vget_low_u16(q0x2u16.val[0]));
- d25u8 = vreinterpret_u8_u16(vget_high_u16(q0x2u16.val[0]));
- d26u8 = vreinterpret_u8_u16(vget_low_u16(q0x2u16.val[1]));
- d27u8 = vreinterpret_u8_u16(vget_high_u16(q0x2u16.val[1]));
- d0x2u8 = vtrn_u8(d24u8, d25u8);
- d1x2u8 = vtrn_u8(d26u8, d27u8);
-
- __builtin_prefetch(src + src_stride * 4);
- __builtin_prefetch(src + src_stride * 5);
- __builtin_prefetch(src + src_stride * 6);
-
- q8u16 = vmovl_u8(d0x2u8.val[0]);
- q9u16 = vmovl_u8(d0x2u8.val[1]);
- q10u16 = vmovl_u8(d1x2u8.val[0]);
- q11u16 = vmovl_u8(d1x2u8.val[1]);
-
- d16u16 = vget_low_u16(q8u16);
- d17u16 = vget_high_u16(q8u16);
- d18u16 = vget_low_u16(q9u16);
- d19u16 = vget_high_u16(q9u16);
- q8u16 = vcombine_u16(d16u16, d18u16); // vswp 17 18
- q9u16 = vcombine_u16(d17u16, d19u16);
-
- d20s16 = vreinterpret_s16_u16(vget_low_u16(q10u16));
- d23s16 = vreinterpret_s16_u16(vget_high_u16(q10u16)); // vmov 23 21
- for (width = w, psrc = src + 7, pdst = dst; width > 0;
- width -= 4, psrc += 4, pdst += 4) { // loop_horiz
- s = psrc;
- d28u32 = vld1_dup_u32((const uint32_t *)s);
- s += src_stride;
- d29u32 = vld1_dup_u32((const uint32_t *)s);
- s += src_stride;
- d31u32 = vld1_dup_u32((const uint32_t *)s);
- s += src_stride;
- d30u32 = vld1_dup_u32((const uint32_t *)s);
-
- __builtin_prefetch(psrc + 64);
-
- d0x2u16 =
- vtrn_u16(vreinterpret_u16_u32(d28u32), vreinterpret_u16_u32(d31u32));
- d1x2u16 =
- vtrn_u16(vreinterpret_u16_u32(d29u32), vreinterpret_u16_u32(d30u32));
- d0x2u8 = vtrn_u8(vreinterpret_u8_u16(d0x2u16.val[0]), // d28
- vreinterpret_u8_u16(d1x2u16.val[0])); // d29
- d1x2u8 = vtrn_u8(vreinterpret_u8_u16(d0x2u16.val[1]), // d31
- vreinterpret_u8_u16(d1x2u16.val[1])); // d30
-
- __builtin_prefetch(psrc + 64 + src_stride);
-
- q14u8 = vcombine_u8(d0x2u8.val[0], d0x2u8.val[1]);
- q15u8 = vcombine_u8(d1x2u8.val[1], d1x2u8.val[0]);
- q0x2u32 =
- vtrnq_u32(vreinterpretq_u32_u8(q14u8), vreinterpretq_u32_u8(q15u8));
-
- d28u8 = vreinterpret_u8_u32(vget_low_u32(q0x2u32.val[0]));
- d29u8 = vreinterpret_u8_u32(vget_high_u32(q0x2u32.val[0]));
- q12u16 = vmovl_u8(d28u8);
- q13u16 = vmovl_u8(d29u8);
-
- __builtin_prefetch(psrc + 64 + src_stride * 2);
-
- d16s16 = vreinterpret_s16_u16(vget_low_u16(q8u16));
- d17s16 = vreinterpret_s16_u16(vget_high_u16(q8u16));
- d18s16 = vreinterpret_s16_u16(vget_low_u16(q9u16));
- d19s16 = vreinterpret_s16_u16(vget_high_u16(q9u16));
- d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16));
- d24s16 = vreinterpret_s16_u16(vget_low_u16(q12u16));
- d25s16 = vreinterpret_s16_u16(vget_high_u16(q12u16));
- d26s16 = vreinterpret_s16_u16(vget_low_u16(q13u16));
- d27s16 = vreinterpret_s16_u16(vget_high_u16(q13u16));
-
- q1s32 = MULTIPLY_BY_Q0(d16s16, d17s16, d20s16, d22s16, d18s16, d19s16,
- d23s16, d24s16, q0s16);
- q2s32 = MULTIPLY_BY_Q0(d17s16, d20s16, d22s16, d18s16, d19s16, d23s16,
- d24s16, d26s16, q0s16);
- q14s32 = MULTIPLY_BY_Q0(d20s16, d22s16, d18s16, d19s16, d23s16, d24s16,
- d26s16, d27s16, q0s16);
- q15s32 = MULTIPLY_BY_Q0(d22s16, d18s16, d19s16, d23s16, d24s16, d26s16,
- d27s16, d25s16, q0s16);
-
- __builtin_prefetch(psrc + 60 + src_stride * 3);
-
- d2u16 = vqrshrun_n_s32(q1s32, 7);
- d3u16 = vqrshrun_n_s32(q2s32, 7);
- d4u16 = vqrshrun_n_s32(q14s32, 7);
- d5u16 = vqrshrun_n_s32(q15s32, 7);
-
- q1u16 = vcombine_u16(d2u16, d3u16);
- q2u16 = vcombine_u16(d4u16, d5u16);
-
- d2u8 = vqmovn_u16(q1u16);
- d3u8 = vqmovn_u16(q2u16);
-
- d0x2u16 = vtrn_u16(vreinterpret_u16_u8(d2u8), vreinterpret_u16_u8(d3u8));
- d0x2u32 = vtrn_u32(vreinterpret_u32_u16(d0x2u16.val[0]),
- vreinterpret_u32_u16(d0x2u16.val[1]));
- d0x2u8 = vtrn_u8(vreinterpret_u8_u32(d0x2u32.val[0]),
- vreinterpret_u8_u32(d0x2u32.val[1]));
-
- d2u32 = vreinterpret_u32_u8(d0x2u8.val[0]);
- d3u32 = vreinterpret_u32_u8(d0x2u8.val[1]);
-
- d = pdst;
- vst1_lane_u32((uint32_t *)d, d2u32, 0);
- d += dst_stride;
- vst1_lane_u32((uint32_t *)d, d3u32, 0);
- d += dst_stride;
- vst1_lane_u32((uint32_t *)d, d2u32, 1);
- d += dst_stride;
- vst1_lane_u32((uint32_t *)d, d3u32, 1);
-
- q8u16 = q9u16;
- d20s16 = d23s16;
- q11u16 = q12u16;
- q9u16 = q13u16;
- d23s16 = vreinterpret_s16_u16(vget_high_u16(q11u16));
+ src -= 3;
+
+ if (h == 4) {
+ uint8x8_t d01, d23;
+ int16x4_t filter3, filter4, s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, d0,
+ d1, d2, d3;
+ int16x8_t tt0, tt1, tt2, tt3;
+
+ __builtin_prefetch(src + 0 * src_stride);
+ __builtin_prefetch(src + 1 * src_stride);
+ __builtin_prefetch(src + 2 * src_stride);
+ __builtin_prefetch(src + 3 * src_stride);
+ filter3 = vdup_lane_s16(vget_low_s16(filters), 3);
+ filter4 = vdup_lane_s16(vget_high_s16(filters), 0);
+ load_8x4(src, src_stride, &t0, &t1, &t2, &t3);
+ transpose_u8_8x4(&t0, &t1, &t2, &t3);
+ tt0 = vreinterpretq_s16_u16(vmovl_u8(t0));
+ tt1 = vreinterpretq_s16_u16(vmovl_u8(t1));
+ tt2 = vreinterpretq_s16_u16(vmovl_u8(t2));
+ tt3 = vreinterpretq_s16_u16(vmovl_u8(t3));
+ s0 = vget_low_s16(tt0);
+ s1 = vget_low_s16(tt1);
+ s2 = vget_low_s16(tt2);
+ s3 = vget_low_s16(tt3);
+ s4 = vget_high_s16(tt0);
+ s5 = vget_high_s16(tt1);
+ s6 = vget_high_s16(tt2);
+ __builtin_prefetch(dst + 0 * dst_stride);
+ __builtin_prefetch(dst + 1 * dst_stride);
+ __builtin_prefetch(dst + 2 * dst_stride);
+ __builtin_prefetch(dst + 3 * dst_stride);
+ src += 7;
+
+ do {
+ load_8x4(src, src_stride, &t0, &t1, &t2, &t3);
+ transpose_u8_8x4(&t0, &t1, &t2, &t3);
+ tt0 = vreinterpretq_s16_u16(vmovl_u8(t0));
+ tt1 = vreinterpretq_s16_u16(vmovl_u8(t1));
+ tt2 = vreinterpretq_s16_u16(vmovl_u8(t2));
+ tt3 = vreinterpretq_s16_u16(vmovl_u8(t3));
+ s7 = vget_low_s16(tt0);
+ s8 = vget_low_s16(tt1);
+ s9 = vget_low_s16(tt2);
+ s10 = vget_low_s16(tt3);
+
+ d0 = convolve8_4(s0, s1, s2, s3, s4, s5, s6, s7, filters, filter3,
+ filter4);
+ d1 = convolve8_4(s1, s2, s3, s4, s5, s6, s7, s8, filters, filter3,
+ filter4);
+ d2 = convolve8_4(s2, s3, s4, s5, s6, s7, s8, s9, filters, filter3,
+ filter4);
+ d3 = convolve8_4(s3, s4, s5, s6, s7, s8, s9, s10, filters, filter3,
+ filter4);
+
+ d01 = vqrshrun_n_s16(vcombine_s16(d0, d1), 7);
+ d23 = vqrshrun_n_s16(vcombine_s16(d2, d3), 7);
+ transpose_u8_4x4(&d01, &d23);
+
+ vst1_lane_u32((uint32_t *)(dst + 0 * dst_stride),
+ vreinterpret_u32_u8(d01), 0);
+ vst1_lane_u32((uint32_t *)(dst + 1 * dst_stride),
+ vreinterpret_u32_u8(d23), 0);
+ vst1_lane_u32((uint32_t *)(dst + 2 * dst_stride),
+ vreinterpret_u32_u8(d01), 1);
+ vst1_lane_u32((uint32_t *)(dst + 3 * dst_stride),
+ vreinterpret_u32_u8(d23), 1);
+
+ s0 = s4;
+ s1 = s5;
+ s2 = s6;
+ s3 = s7;
+ s4 = s8;
+ s5 = s9;
+ s6 = s10;
+ src += 4;
+ dst += 4;
+ w -= 4;
+ } while (w > 0);
+ } else {
+ const int16x8_t filter3 = vdupq_lane_s16(vget_low_s16(filters), 3);
+ const int16x8_t filter4 = vdupq_lane_s16(vget_high_s16(filters), 0);
+ int width;
+ const uint8_t *s;
+ uint8x8_t t4, t5, t6, t7;
+ int16x8_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, d0, d1, d2, d3;
+
+ if (w == 4) {
+ do {
+ load_8x8(src, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
+ transpose_u8_8x8(&t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
+ s0 = vreinterpretq_s16_u16(vmovl_u8(t0));
+ s1 = vreinterpretq_s16_u16(vmovl_u8(t1));
+ s2 = vreinterpretq_s16_u16(vmovl_u8(t2));
+ s3 = vreinterpretq_s16_u16(vmovl_u8(t3));
+ s4 = vreinterpretq_s16_u16(vmovl_u8(t4));
+ s5 = vreinterpretq_s16_u16(vmovl_u8(t5));
+ s6 = vreinterpretq_s16_u16(vmovl_u8(t6));
+
+ load_8x8(src + 7, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
+ src += 8 * src_stride;
+ __builtin_prefetch(dst + 0 * dst_stride);
+ __builtin_prefetch(dst + 1 * dst_stride);
+ __builtin_prefetch(dst + 2 * dst_stride);
+ __builtin_prefetch(dst + 3 * dst_stride);
+ __builtin_prefetch(dst + 4 * dst_stride);
+ __builtin_prefetch(dst + 5 * dst_stride);
+ __builtin_prefetch(dst + 6 * dst_stride);
+ __builtin_prefetch(dst + 7 * dst_stride);
+ transpose_u8_8x8(&t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
+ s7 = vreinterpretq_s16_u16(vmovl_u8(t0));
+ s8 = vreinterpretq_s16_u16(vmovl_u8(t1));
+ s9 = vreinterpretq_s16_u16(vmovl_u8(t2));
+ s10 = vreinterpretq_s16_u16(vmovl_u8(t3));
+
+ __builtin_prefetch(src + 0 * src_stride);
+ __builtin_prefetch(src + 1 * src_stride);
+ __builtin_prefetch(src + 2 * src_stride);
+ __builtin_prefetch(src + 3 * src_stride);
+ __builtin_prefetch(src + 4 * src_stride);
+ __builtin_prefetch(src + 5 * src_stride);
+ __builtin_prefetch(src + 6 * src_stride);
+ __builtin_prefetch(src + 7 * src_stride);
+ d0 = convolve8_8(s0, s1, s2, s3, s4, s5, s6, s7, filters, filter3,
+ filter4);
+ d1 = convolve8_8(s1, s2, s3, s4, s5, s6, s7, s8, filters, filter3,
+ filter4);
+ d2 = convolve8_8(s2, s3, s4, s5, s6, s7, s8, s9, filters, filter3,
+ filter4);
+ d3 = convolve8_8(s3, s4, s5, s6, s7, s8, s9, s10, filters, filter3,
+ filter4);
+
+ t0 = vqrshrun_n_s16(d0, 7);
+ t1 = vqrshrun_n_s16(d1, 7);
+ t2 = vqrshrun_n_s16(d2, 7);
+ t3 = vqrshrun_n_s16(d3, 7);
+ transpose_u8_8x4(&t0, &t1, &t2, &t3);
+ vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(t0), 0);
+ dst += dst_stride;
+ vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(t1), 0);
+ dst += dst_stride;
+ vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(t2), 0);
+ dst += dst_stride;
+ vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(t3), 0);
+ dst += dst_stride;
+ vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(t0), 1);
+ dst += dst_stride;
+ vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(t1), 1);
+ dst += dst_stride;
+ vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(t2), 1);
+ dst += dst_stride;
+ vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(t3), 1);
+ dst += dst_stride;
+ h -= 8;
+ } while (h > 0);
+ } else {
+ uint8_t *d;
+ int16x8_t s11, s12, s13, s14, d4, d5, d6, d7;
+
+ do {
+ __builtin_prefetch(src + 0 * src_stride);
+ __builtin_prefetch(src + 1 * src_stride);
+ __builtin_prefetch(src + 2 * src_stride);
+ __builtin_prefetch(src + 3 * src_stride);
+ __builtin_prefetch(src + 4 * src_stride);
+ __builtin_prefetch(src + 5 * src_stride);
+ __builtin_prefetch(src + 6 * src_stride);
+ __builtin_prefetch(src + 7 * src_stride);
+ load_8x8(src, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
+ transpose_u8_8x8(&t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
+ s0 = vreinterpretq_s16_u16(vmovl_u8(t0));
+ s1 = vreinterpretq_s16_u16(vmovl_u8(t1));
+ s2 = vreinterpretq_s16_u16(vmovl_u8(t2));
+ s3 = vreinterpretq_s16_u16(vmovl_u8(t3));
+ s4 = vreinterpretq_s16_u16(vmovl_u8(t4));
+ s5 = vreinterpretq_s16_u16(vmovl_u8(t5));
+ s6 = vreinterpretq_s16_u16(vmovl_u8(t6));
+
+ width = w;
+ s = src + 7;
+ d = dst;
+ __builtin_prefetch(dst + 0 * dst_stride);
+ __builtin_prefetch(dst + 1 * dst_stride);
+ __builtin_prefetch(dst + 2 * dst_stride);
+ __builtin_prefetch(dst + 3 * dst_stride);
+ __builtin_prefetch(dst + 4 * dst_stride);
+ __builtin_prefetch(dst + 5 * dst_stride);
+ __builtin_prefetch(dst + 6 * dst_stride);
+ __builtin_prefetch(dst + 7 * dst_stride);
+
+ do {
+ load_8x8(s, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
+ transpose_u8_8x8(&t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
+ s7 = vreinterpretq_s16_u16(vmovl_u8(t0));
+ s8 = vreinterpretq_s16_u16(vmovl_u8(t1));
+ s9 = vreinterpretq_s16_u16(vmovl_u8(t2));
+ s10 = vreinterpretq_s16_u16(vmovl_u8(t3));
+ s11 = vreinterpretq_s16_u16(vmovl_u8(t4));
+ s12 = vreinterpretq_s16_u16(vmovl_u8(t5));
+ s13 = vreinterpretq_s16_u16(vmovl_u8(t6));
+ s14 = vreinterpretq_s16_u16(vmovl_u8(t7));
+
+ d0 = convolve8_8(s0, s1, s2, s3, s4, s5, s6, s7, filters, filter3,
+ filter4);
+ d1 = convolve8_8(s1, s2, s3, s4, s5, s6, s7, s8, filters, filter3,
+ filter4);
+ d2 = convolve8_8(s2, s3, s4, s5, s6, s7, s8, s9, filters, filter3,
+ filter4);
+ d3 = convolve8_8(s3, s4, s5, s6, s7, s8, s9, s10, filters, filter3,
+ filter4);
+ d4 = convolve8_8(s4, s5, s6, s7, s8, s9, s10, s11, filters, filter3,
+ filter4);
+ d5 = convolve8_8(s5, s6, s7, s8, s9, s10, s11, s12, filters, filter3,
+ filter4);
+ d6 = convolve8_8(s6, s7, s8, s9, s10, s11, s12, s13, filters, filter3,
+ filter4);
+ d7 = convolve8_8(s7, s8, s9, s10, s11, s12, s13, s14, filters,
+ filter3, filter4);
+
+ t0 = vqrshrun_n_s16(d0, 7);
+ t1 = vqrshrun_n_s16(d1, 7);
+ t2 = vqrshrun_n_s16(d2, 7);
+ t3 = vqrshrun_n_s16(d3, 7);
+ t4 = vqrshrun_n_s16(d4, 7);
+ t5 = vqrshrun_n_s16(d5, 7);
+ t6 = vqrshrun_n_s16(d6, 7);
+ t7 = vqrshrun_n_s16(d7, 7);
+ transpose_u8_8x8(&t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
+ store_8x8(d, dst_stride, t0, t1, t2, t3, t4, t5, t6, t7);
+
+ s0 = s8;
+ s1 = s9;
+ s2 = s10;
+ s3 = s11;
+ s4 = s12;
+ s5 = s13;
+ s6 = s14;
+ s += 8;
+ d += 8;
+ width -= 8;
+ } while (width > 0);
+ src += 8 * src_stride;
+ dst += 8 * dst_stride;
+ h -= 8;
+ } while (h > 0);
+ }
+ }
+}
+
+void vpx_convolve8_avg_horiz_neon(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y, // unused
+ int y_step_q4, // unused
+ int w, int h) {
+ const int16x8_t filters = vld1q_s16(filter_x);
+ uint8x8_t t0, t1, t2, t3;
+
+ assert(!((intptr_t)dst & 3));
+ assert(!(dst_stride & 3));
+ assert(x_step_q4 == 16);
+
+ (void)x_step_q4;
+ (void)y_step_q4;
+ (void)filter_y;
+
+ src -= 3;
+
+ if (h == 4) {
+ uint8x8_t d01, d23;
+ int16x4_t filter3, filter4, s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, d0,
+ d1, d2, d3;
+ int16x8_t tt0, tt1, tt2, tt3;
+ uint32x4_t d0123 = vdupq_n_u32(0);
+
+ __builtin_prefetch(src + 0 * src_stride);
+ __builtin_prefetch(src + 1 * src_stride);
+ __builtin_prefetch(src + 2 * src_stride);
+ __builtin_prefetch(src + 3 * src_stride);
+ filter3 = vdup_lane_s16(vget_low_s16(filters), 3);
+ filter4 = vdup_lane_s16(vget_high_s16(filters), 0);
+ load_8x4(src, src_stride, &t0, &t1, &t2, &t3);
+ transpose_u8_8x4(&t0, &t1, &t2, &t3);
+ tt0 = vreinterpretq_s16_u16(vmovl_u8(t0));
+ tt1 = vreinterpretq_s16_u16(vmovl_u8(t1));
+ tt2 = vreinterpretq_s16_u16(vmovl_u8(t2));
+ tt3 = vreinterpretq_s16_u16(vmovl_u8(t3));
+ s0 = vget_low_s16(tt0);
+ s1 = vget_low_s16(tt1);
+ s2 = vget_low_s16(tt2);
+ s3 = vget_low_s16(tt3);
+ s4 = vget_high_s16(tt0);
+ s5 = vget_high_s16(tt1);
+ s6 = vget_high_s16(tt2);
+ __builtin_prefetch(dst + 0 * dst_stride);
+ __builtin_prefetch(dst + 1 * dst_stride);
+ __builtin_prefetch(dst + 2 * dst_stride);
+ __builtin_prefetch(dst + 3 * dst_stride);
+ src += 7;
+
+ do {
+ load_8x4(src, src_stride, &t0, &t1, &t2, &t3);
+ transpose_u8_8x4(&t0, &t1, &t2, &t3);
+ tt0 = vreinterpretq_s16_u16(vmovl_u8(t0));
+ tt1 = vreinterpretq_s16_u16(vmovl_u8(t1));
+ tt2 = vreinterpretq_s16_u16(vmovl_u8(t2));
+ tt3 = vreinterpretq_s16_u16(vmovl_u8(t3));
+ s7 = vget_low_s16(tt0);
+ s8 = vget_low_s16(tt1);
+ s9 = vget_low_s16(tt2);
+ s10 = vget_low_s16(tt3);
+
+ d0 = convolve8_4(s0, s1, s2, s3, s4, s5, s6, s7, filters, filter3,
+ filter4);
+ d1 = convolve8_4(s1, s2, s3, s4, s5, s6, s7, s8, filters, filter3,
+ filter4);
+ d2 = convolve8_4(s2, s3, s4, s5, s6, s7, s8, s9, filters, filter3,
+ filter4);
+ d3 = convolve8_4(s3, s4, s5, s6, s7, s8, s9, s10, filters, filter3,
+ filter4);
+
+ d01 = vqrshrun_n_s16(vcombine_s16(d0, d1), 7);
+ d23 = vqrshrun_n_s16(vcombine_s16(d2, d3), 7);
+ transpose_u8_4x4(&d01, &d23);
+
+ d0123 = vld1q_lane_u32((uint32_t *)(dst + 0 * dst_stride), d0123, 0);
+ d0123 = vld1q_lane_u32((uint32_t *)(dst + 1 * dst_stride), d0123, 2);
+ d0123 = vld1q_lane_u32((uint32_t *)(dst + 2 * dst_stride), d0123, 1);
+ d0123 = vld1q_lane_u32((uint32_t *)(dst + 3 * dst_stride), d0123, 3);
+ d0123 = vreinterpretq_u32_u8(
+ vrhaddq_u8(vreinterpretq_u8_u32(d0123), vcombine_u8(d01, d23)));
+
+ vst1q_lane_u32((uint32_t *)(dst + 0 * dst_stride), d0123, 0);
+ vst1q_lane_u32((uint32_t *)(dst + 1 * dst_stride), d0123, 2);
+ vst1q_lane_u32((uint32_t *)(dst + 2 * dst_stride), d0123, 1);
+ vst1q_lane_u32((uint32_t *)(dst + 3 * dst_stride), d0123, 3);
+
+ s0 = s4;
+ s1 = s5;
+ s2 = s6;
+ s3 = s7;
+ s4 = s8;
+ s5 = s9;
+ s6 = s10;
+ src += 4;
+ dst += 4;
+ w -= 4;
+ } while (w > 0);
+ } else {
+ const int16x8_t filter3 = vdupq_lane_s16(vget_low_s16(filters), 3);
+ const int16x8_t filter4 = vdupq_lane_s16(vget_high_s16(filters), 0);
+ int width;
+ const uint8_t *s;
+ uint8x8_t t4, t5, t6, t7;
+ int16x8_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, d0, d1, d2, d3;
+
+ if (w == 4) {
+ uint32x4_t d0415 = vdupq_n_u32(0);
+ uint32x4_t d2637 = vdupq_n_u32(0);
+ do {
+ load_8x8(src, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
+ transpose_u8_8x8(&t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
+ s0 = vreinterpretq_s16_u16(vmovl_u8(t0));
+ s1 = vreinterpretq_s16_u16(vmovl_u8(t1));
+ s2 = vreinterpretq_s16_u16(vmovl_u8(t2));
+ s3 = vreinterpretq_s16_u16(vmovl_u8(t3));
+ s4 = vreinterpretq_s16_u16(vmovl_u8(t4));
+ s5 = vreinterpretq_s16_u16(vmovl_u8(t5));
+ s6 = vreinterpretq_s16_u16(vmovl_u8(t6));
+
+ load_8x8(src + 7, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
+ src += 8 * src_stride;
+ __builtin_prefetch(dst + 0 * dst_stride);
+ __builtin_prefetch(dst + 1 * dst_stride);
+ __builtin_prefetch(dst + 2 * dst_stride);
+ __builtin_prefetch(dst + 3 * dst_stride);
+ __builtin_prefetch(dst + 4 * dst_stride);
+ __builtin_prefetch(dst + 5 * dst_stride);
+ __builtin_prefetch(dst + 6 * dst_stride);
+ __builtin_prefetch(dst + 7 * dst_stride);
+ transpose_u8_8x8(&t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
+ s7 = vreinterpretq_s16_u16(vmovl_u8(t0));
+ s8 = vreinterpretq_s16_u16(vmovl_u8(t1));
+ s9 = vreinterpretq_s16_u16(vmovl_u8(t2));
+ s10 = vreinterpretq_s16_u16(vmovl_u8(t3));
+
+ __builtin_prefetch(src + 0 * src_stride);
+ __builtin_prefetch(src + 1 * src_stride);
+ __builtin_prefetch(src + 2 * src_stride);
+ __builtin_prefetch(src + 3 * src_stride);
+ __builtin_prefetch(src + 4 * src_stride);
+ __builtin_prefetch(src + 5 * src_stride);
+ __builtin_prefetch(src + 6 * src_stride);
+ __builtin_prefetch(src + 7 * src_stride);
+ d0 = convolve8_8(s0, s1, s2, s3, s4, s5, s6, s7, filters, filter3,
+ filter4);
+ d1 = convolve8_8(s1, s2, s3, s4, s5, s6, s7, s8, filters, filter3,
+ filter4);
+ d2 = convolve8_8(s2, s3, s4, s5, s6, s7, s8, s9, filters, filter3,
+ filter4);
+ d3 = convolve8_8(s3, s4, s5, s6, s7, s8, s9, s10, filters, filter3,
+ filter4);
+
+ t0 = vqrshrun_n_s16(d0, 7);
+ t1 = vqrshrun_n_s16(d1, 7);
+ t2 = vqrshrun_n_s16(d2, 7);
+ t3 = vqrshrun_n_s16(d3, 7);
+ transpose_u8_8x4(&t0, &t1, &t2, &t3);
+
+ d0415 = vld1q_lane_u32((uint32_t *)(dst + 0 * dst_stride), d0415, 0);
+ d0415 = vld1q_lane_u32((uint32_t *)(dst + 1 * dst_stride), d0415, 2);
+ d2637 = vld1q_lane_u32((uint32_t *)(dst + 2 * dst_stride), d2637, 0);
+ d2637 = vld1q_lane_u32((uint32_t *)(dst + 3 * dst_stride), d2637, 2);
+ d0415 = vld1q_lane_u32((uint32_t *)(dst + 4 * dst_stride), d0415, 1);
+ d0415 = vld1q_lane_u32((uint32_t *)(dst + 5 * dst_stride), d0415, 3);
+ d2637 = vld1q_lane_u32((uint32_t *)(dst + 6 * dst_stride), d2637, 1);
+ d2637 = vld1q_lane_u32((uint32_t *)(dst + 7 * dst_stride), d2637, 3);
+ d0415 = vreinterpretq_u32_u8(
+ vrhaddq_u8(vreinterpretq_u8_u32(d0415), vcombine_u8(t0, t1)));
+ d2637 = vreinterpretq_u32_u8(
+ vrhaddq_u8(vreinterpretq_u8_u32(d2637), vcombine_u8(t2, t3)));
+
+ vst1q_lane_u32((uint32_t *)dst, d0415, 0);
+ dst += dst_stride;
+ vst1q_lane_u32((uint32_t *)dst, d0415, 2);
+ dst += dst_stride;
+ vst1q_lane_u32((uint32_t *)dst, d2637, 0);
+ dst += dst_stride;
+ vst1q_lane_u32((uint32_t *)dst, d2637, 2);
+ dst += dst_stride;
+ vst1q_lane_u32((uint32_t *)dst, d0415, 1);
+ dst += dst_stride;
+ vst1q_lane_u32((uint32_t *)dst, d0415, 3);
+ dst += dst_stride;
+ vst1q_lane_u32((uint32_t *)dst, d2637, 1);
+ dst += dst_stride;
+ vst1q_lane_u32((uint32_t *)dst, d2637, 3);
+ dst += dst_stride;
+ h -= 8;
+ } while (h > 0);
+ } else {
+ uint8_t *d;
+ int16x8_t s11, s12, s13, s14, d4, d5, d6, d7;
+ uint8x16_t d01, d23, d45, d67;
+
+ do {
+ __builtin_prefetch(src + 0 * src_stride);
+ __builtin_prefetch(src + 1 * src_stride);
+ __builtin_prefetch(src + 2 * src_stride);
+ __builtin_prefetch(src + 3 * src_stride);
+ __builtin_prefetch(src + 4 * src_stride);
+ __builtin_prefetch(src + 5 * src_stride);
+ __builtin_prefetch(src + 6 * src_stride);
+ __builtin_prefetch(src + 7 * src_stride);
+ load_8x8(src, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
+ transpose_u8_8x8(&t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
+ s0 = vreinterpretq_s16_u16(vmovl_u8(t0));
+ s1 = vreinterpretq_s16_u16(vmovl_u8(t1));
+ s2 = vreinterpretq_s16_u16(vmovl_u8(t2));
+ s3 = vreinterpretq_s16_u16(vmovl_u8(t3));
+ s4 = vreinterpretq_s16_u16(vmovl_u8(t4));
+ s5 = vreinterpretq_s16_u16(vmovl_u8(t5));
+ s6 = vreinterpretq_s16_u16(vmovl_u8(t6));
+
+ width = w;
+ s = src + 7;
+ d = dst;
+ __builtin_prefetch(dst + 0 * dst_stride);
+ __builtin_prefetch(dst + 1 * dst_stride);
+ __builtin_prefetch(dst + 2 * dst_stride);
+ __builtin_prefetch(dst + 3 * dst_stride);
+ __builtin_prefetch(dst + 4 * dst_stride);
+ __builtin_prefetch(dst + 5 * dst_stride);
+ __builtin_prefetch(dst + 6 * dst_stride);
+ __builtin_prefetch(dst + 7 * dst_stride);
+
+ do {
+ load_8x8(s, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
+ transpose_u8_8x8(&t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
+ s7 = vreinterpretq_s16_u16(vmovl_u8(t0));
+ s8 = vreinterpretq_s16_u16(vmovl_u8(t1));
+ s9 = vreinterpretq_s16_u16(vmovl_u8(t2));
+ s10 = vreinterpretq_s16_u16(vmovl_u8(t3));
+ s11 = vreinterpretq_s16_u16(vmovl_u8(t4));
+ s12 = vreinterpretq_s16_u16(vmovl_u8(t5));
+ s13 = vreinterpretq_s16_u16(vmovl_u8(t6));
+ s14 = vreinterpretq_s16_u16(vmovl_u8(t7));
+
+ d0 = convolve8_8(s0, s1, s2, s3, s4, s5, s6, s7, filters, filter3,
+ filter4);
+ d1 = convolve8_8(s1, s2, s3, s4, s5, s6, s7, s8, filters, filter3,
+ filter4);
+ d2 = convolve8_8(s2, s3, s4, s5, s6, s7, s8, s9, filters, filter3,
+ filter4);
+ d3 = convolve8_8(s3, s4, s5, s6, s7, s8, s9, s10, filters, filter3,
+ filter4);
+ d4 = convolve8_8(s4, s5, s6, s7, s8, s9, s10, s11, filters, filter3,
+ filter4);
+ d5 = convolve8_8(s5, s6, s7, s8, s9, s10, s11, s12, filters, filter3,
+ filter4);
+ d6 = convolve8_8(s6, s7, s8, s9, s10, s11, s12, s13, filters, filter3,
+ filter4);
+ d7 = convolve8_8(s7, s8, s9, s10, s11, s12, s13, s14, filters,
+ filter3, filter4);
+
+ t0 = vqrshrun_n_s16(d0, 7);
+ t1 = vqrshrun_n_s16(d1, 7);
+ t2 = vqrshrun_n_s16(d2, 7);
+ t3 = vqrshrun_n_s16(d3, 7);
+ t4 = vqrshrun_n_s16(d4, 7);
+ t5 = vqrshrun_n_s16(d5, 7);
+ t6 = vqrshrun_n_s16(d6, 7);
+ t7 = vqrshrun_n_s16(d7, 7);
+ transpose_u8_8x8(&t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
+
+ d01 = vcombine_u8(vld1_u8(d + 0 * dst_stride),
+ vld1_u8(d + 1 * dst_stride));
+ d23 = vcombine_u8(vld1_u8(d + 2 * dst_stride),
+ vld1_u8(d + 3 * dst_stride));
+ d45 = vcombine_u8(vld1_u8(d + 4 * dst_stride),
+ vld1_u8(d + 5 * dst_stride));
+ d67 = vcombine_u8(vld1_u8(d + 6 * dst_stride),
+ vld1_u8(d + 7 * dst_stride));
+ d01 = vrhaddq_u8(d01, vcombine_u8(t0, t1));
+ d23 = vrhaddq_u8(d23, vcombine_u8(t2, t3));
+ d45 = vrhaddq_u8(d45, vcombine_u8(t4, t5));
+ d67 = vrhaddq_u8(d67, vcombine_u8(t6, t7));
+
+ store_8x8(d, dst_stride, vget_low_u8(d01), vget_high_u8(d01),
+ vget_low_u8(d23), vget_high_u8(d23), vget_low_u8(d45),
+ vget_high_u8(d45), vget_low_u8(d67), vget_high_u8(d67));
+
+ s0 = s8;
+ s1 = s9;
+ s2 = s10;
+ s3 = s11;
+ s4 = s12;
+ s5 = s13;
+ s6 = s14;
+ s += 8;
+ d += 8;
+ width -= 8;
+ } while (width > 0);
+ src += 8 * src_stride;
+ dst += 8 * dst_stride;
+ h -= 8;
+ } while (h > 0);
}
}
- return;
}
void vpx_convolve8_vert_neon(const uint8_t *src, ptrdiff_t src_stride,
@@ -214,117 +696,359 @@ void vpx_convolve8_vert_neon(const uint8_t *src, ptrdiff_t src_stride,
int x_step_q4, // unused
const int16_t *filter_y, int y_step_q4, int w,
int h) {
- int height;
- const uint8_t *s;
- uint8_t *d;
- uint32x2_t d2u32, d3u32;
- uint32x2_t d16u32, d18u32, d20u32, d22u32, d24u32, d26u32;
- int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16, d22s16;
- int16x4_t d24s16, d25s16, d26s16, d27s16;
- uint16x4_t d2u16, d3u16, d4u16, d5u16;
- int16x8_t q0s16;
- uint16x8_t q1u16, q2u16, q8u16, q9u16, q10u16, q11u16, q12u16, q13u16;
- int32x4_t q1s32, q2s32, q14s32, q15s32;
+ const int16x8_t filters = vld1q_s16(filter_y);
+ assert(!((intptr_t)dst & 3));
+ assert(!(dst_stride & 3));
assert(y_step_q4 == 16);
(void)x_step_q4;
(void)y_step_q4;
(void)filter_x;
- src -= src_stride * 3;
- q0s16 = vld1q_s16(filter_y);
- for (; w > 0; w -= 4, src += 4, dst += 4) { // loop_vert_h
- s = src;
- d16u32 = vld1_lane_u32((const uint32_t *)s, d16u32, 0);
- s += src_stride;
- d16u32 = vld1_lane_u32((const uint32_t *)s, d16u32, 1);
- s += src_stride;
- d18u32 = vld1_lane_u32((const uint32_t *)s, d18u32, 0);
- s += src_stride;
- d18u32 = vld1_lane_u32((const uint32_t *)s, d18u32, 1);
- s += src_stride;
- d20u32 = vld1_lane_u32((const uint32_t *)s, d20u32, 0);
- s += src_stride;
- d20u32 = vld1_lane_u32((const uint32_t *)s, d20u32, 1);
- s += src_stride;
- d22u32 = vld1_lane_u32((const uint32_t *)s, d22u32, 0);
- s += src_stride;
-
- q8u16 = vmovl_u8(vreinterpret_u8_u32(d16u32));
- q9u16 = vmovl_u8(vreinterpret_u8_u32(d18u32));
- q10u16 = vmovl_u8(vreinterpret_u8_u32(d20u32));
- q11u16 = vmovl_u8(vreinterpret_u8_u32(d22u32));
-
- d18s16 = vreinterpret_s16_u16(vget_low_u16(q9u16));
- d19s16 = vreinterpret_s16_u16(vget_high_u16(q9u16));
- d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16));
- d = dst;
- for (height = h; height > 0; height -= 4) { // loop_vert
- d24u32 = vld1_lane_u32((const uint32_t *)s, d24u32, 0);
+ src -= 3 * src_stride;
+
+ if (w == 4) {
+ const int16x4_t filter3 = vdup_lane_s16(vget_low_s16(filters), 3);
+ const int16x4_t filter4 = vdup_lane_s16(vget_high_s16(filters), 0);
+ uint8x8_t d01, d23;
+ int16x4_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, d0, d1, d2, d3;
+
+ s0 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src))));
+ src += src_stride;
+ s1 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src))));
+ src += src_stride;
+ s2 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src))));
+ src += src_stride;
+ s3 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src))));
+ src += src_stride;
+ s4 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src))));
+ src += src_stride;
+ s5 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src))));
+ src += src_stride;
+ s6 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src))));
+ src += src_stride;
+
+ do {
+ s7 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src))));
+ src += src_stride;
+ s8 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src))));
+ src += src_stride;
+ s9 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src))));
+ src += src_stride;
+ s10 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src))));
+ src += src_stride;
+
+ __builtin_prefetch(dst + 0 * dst_stride);
+ __builtin_prefetch(dst + 1 * dst_stride);
+ __builtin_prefetch(dst + 2 * dst_stride);
+ __builtin_prefetch(dst + 3 * dst_stride);
+ __builtin_prefetch(src + 0 * src_stride);
+ __builtin_prefetch(src + 1 * src_stride);
+ __builtin_prefetch(src + 2 * src_stride);
+ __builtin_prefetch(src + 3 * src_stride);
+ d0 = convolve8_4(s0, s1, s2, s3, s4, s5, s6, s7, filters, filter3,
+ filter4);
+ d1 = convolve8_4(s1, s2, s3, s4, s5, s6, s7, s8, filters, filter3,
+ filter4);
+ d2 = convolve8_4(s2, s3, s4, s5, s6, s7, s8, s9, filters, filter3,
+ filter4);
+ d3 = convolve8_4(s3, s4, s5, s6, s7, s8, s9, s10, filters, filter3,
+ filter4);
+
+ d01 = vqrshrun_n_s16(vcombine_s16(d0, d1), 7);
+ d23 = vqrshrun_n_s16(vcombine_s16(d2, d3), 7);
+ vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d01), 0);
+ dst += dst_stride;
+ vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d01), 1);
+ dst += dst_stride;
+ vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d23), 0);
+ dst += dst_stride;
+ vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d23), 1);
+ dst += dst_stride;
+
+ s0 = s4;
+ s1 = s5;
+ s2 = s6;
+ s3 = s7;
+ s4 = s8;
+ s5 = s9;
+ s6 = s10;
+ h -= 4;
+ } while (h > 0);
+ } else {
+ const int16x8_t filter3 = vdupq_lane_s16(vget_low_s16(filters), 3);
+ const int16x8_t filter4 = vdupq_lane_s16(vget_high_s16(filters), 0);
+ int height;
+ const uint8_t *s;
+ uint8_t *d;
+ int16x8_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, d0, d1, d2, d3;
+
+ do {
+ __builtin_prefetch(src + 0 * src_stride);
+ __builtin_prefetch(src + 1 * src_stride);
+ __builtin_prefetch(src + 2 * src_stride);
+ __builtin_prefetch(src + 3 * src_stride);
+ __builtin_prefetch(src + 4 * src_stride);
+ __builtin_prefetch(src + 5 * src_stride);
+ __builtin_prefetch(src + 6 * src_stride);
+ s = src;
+ s0 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(s)));
+ s += src_stride;
+ s1 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(s)));
+ s += src_stride;
+ s2 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(s)));
s += src_stride;
- d26u32 = vld1_lane_u32((const uint32_t *)s, d26u32, 0);
+ s3 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(s)));
s += src_stride;
- d26u32 = vld1_lane_u32((const uint32_t *)s, d26u32, 1);
+ s4 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(s)));
s += src_stride;
- d24u32 = vld1_lane_u32((const uint32_t *)s, d24u32, 1);
+ s5 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(s)));
s += src_stride;
+ s6 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(s)));
+ s += src_stride;
+ d = dst;
+ height = h;
- q12u16 = vmovl_u8(vreinterpret_u8_u32(d24u32));
- q13u16 = vmovl_u8(vreinterpret_u8_u32(d26u32));
-
- d16s16 = vreinterpret_s16_u16(vget_low_u16(q8u16));
- d17s16 = vreinterpret_s16_u16(vget_high_u16(q8u16));
- d20s16 = vreinterpret_s16_u16(vget_low_u16(q10u16));
- d21s16 = vreinterpret_s16_u16(vget_high_u16(q10u16));
- d24s16 = vreinterpret_s16_u16(vget_low_u16(q12u16));
- d25s16 = vreinterpret_s16_u16(vget_high_u16(q12u16));
- d26s16 = vreinterpret_s16_u16(vget_low_u16(q13u16));
- d27s16 = vreinterpret_s16_u16(vget_high_u16(q13u16));
-
- __builtin_prefetch(d);
- __builtin_prefetch(d + dst_stride);
- q1s32 = MULTIPLY_BY_Q0(d16s16, d17s16, d18s16, d19s16, d20s16, d21s16,
- d22s16, d24s16, q0s16);
- __builtin_prefetch(d + dst_stride * 2);
- __builtin_prefetch(d + dst_stride * 3);
- q2s32 = MULTIPLY_BY_Q0(d17s16, d18s16, d19s16, d20s16, d21s16, d22s16,
- d24s16, d26s16, q0s16);
- __builtin_prefetch(s);
- __builtin_prefetch(s + src_stride);
- q14s32 = MULTIPLY_BY_Q0(d18s16, d19s16, d20s16, d21s16, d22s16, d24s16,
- d26s16, d27s16, q0s16);
- __builtin_prefetch(s + src_stride * 2);
- __builtin_prefetch(s + src_stride * 3);
- q15s32 = MULTIPLY_BY_Q0(d19s16, d20s16, d21s16, d22s16, d24s16, d26s16,
- d27s16, d25s16, q0s16);
-
- d2u16 = vqrshrun_n_s32(q1s32, 7);
- d3u16 = vqrshrun_n_s32(q2s32, 7);
- d4u16 = vqrshrun_n_s32(q14s32, 7);
- d5u16 = vqrshrun_n_s32(q15s32, 7);
-
- q1u16 = vcombine_u16(d2u16, d3u16);
- q2u16 = vcombine_u16(d4u16, d5u16);
-
- d2u32 = vreinterpret_u32_u8(vqmovn_u16(q1u16));
- d3u32 = vreinterpret_u32_u8(vqmovn_u16(q2u16));
-
- vst1_lane_u32((uint32_t *)d, d2u32, 0);
- d += dst_stride;
- vst1_lane_u32((uint32_t *)d, d2u32, 1);
- d += dst_stride;
- vst1_lane_u32((uint32_t *)d, d3u32, 0);
- d += dst_stride;
- vst1_lane_u32((uint32_t *)d, d3u32, 1);
- d += dst_stride;
-
- q8u16 = q10u16;
- d18s16 = d22s16;
- d19s16 = d24s16;
- q10u16 = q13u16;
- d22s16 = d25s16;
- }
+ do {
+ s7 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(s)));
+ s += src_stride;
+ s8 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(s)));
+ s += src_stride;
+ s9 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(s)));
+ s += src_stride;
+ s10 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(s)));
+ s += src_stride;
+
+ __builtin_prefetch(dst + 0 * dst_stride);
+ __builtin_prefetch(dst + 1 * dst_stride);
+ __builtin_prefetch(dst + 2 * dst_stride);
+ __builtin_prefetch(dst + 3 * dst_stride);
+ __builtin_prefetch(src + 0 * src_stride);
+ __builtin_prefetch(src + 1 * src_stride);
+ __builtin_prefetch(src + 2 * src_stride);
+ __builtin_prefetch(src + 3 * src_stride);
+ d0 = convolve8_8(s0, s1, s2, s3, s4, s5, s6, s7, filters, filter3,
+ filter4);
+ d1 = convolve8_8(s1, s2, s3, s4, s5, s6, s7, s8, filters, filter3,
+ filter4);
+ d2 = convolve8_8(s2, s3, s4, s5, s6, s7, s8, s9, filters, filter3,
+ filter4);
+ d3 = convolve8_8(s3, s4, s5, s6, s7, s8, s9, s10, filters, filter3,
+ filter4);
+
+ vst1_u8(d, vqrshrun_n_s16(d0, 7));
+ d += dst_stride;
+ vst1_u8(d, vqrshrun_n_s16(d1, 7));
+ d += dst_stride;
+ vst1_u8(d, vqrshrun_n_s16(d2, 7));
+ d += dst_stride;
+ vst1_u8(d, vqrshrun_n_s16(d3, 7));
+ d += dst_stride;
+
+ s0 = s4;
+ s1 = s5;
+ s2 = s6;
+ s3 = s7;
+ s4 = s8;
+ s5 = s9;
+ s6 = s10;
+ height -= 4;
+ } while (height > 0);
+ src += 8;
+ dst += 8;
+ w -= 8;
+ } while (w > 0);
+ }
+}
+
+void vpx_convolve8_avg_vert_neon(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x, // unused
+ int x_step_q4, // unused
+ const int16_t *filter_y, int y_step_q4, int w,
+ int h) {
+ const int16x8_t filters = vld1q_s16(filter_y);
+
+ assert(!((intptr_t)dst & 3));
+ assert(!(dst_stride & 3));
+ assert(y_step_q4 == 16);
+
+ (void)x_step_q4;
+ (void)y_step_q4;
+ (void)filter_x;
+
+ src -= 3 * src_stride;
+
+ if (w == 4) {
+ const int16x4_t filter3 = vdup_lane_s16(vget_low_s16(filters), 3);
+ const int16x4_t filter4 = vdup_lane_s16(vget_high_s16(filters), 0);
+ uint8x8_t d01, d23;
+ int16x4_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, d0, d1, d2, d3;
+ uint32x4_t d0123 = vdupq_n_u32(0);
+
+ s0 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src))));
+ src += src_stride;
+ s1 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src))));
+ src += src_stride;
+ s2 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src))));
+ src += src_stride;
+ s3 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src))));
+ src += src_stride;
+ s4 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src))));
+ src += src_stride;
+ s5 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src))));
+ src += src_stride;
+ s6 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src))));
+ src += src_stride;
+
+ do {
+ s7 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src))));
+ src += src_stride;
+ s8 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src))));
+ src += src_stride;
+ s9 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src))));
+ src += src_stride;
+ s10 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src))));
+ src += src_stride;
+
+ __builtin_prefetch(dst + 0 * dst_stride);
+ __builtin_prefetch(dst + 1 * dst_stride);
+ __builtin_prefetch(dst + 2 * dst_stride);
+ __builtin_prefetch(dst + 3 * dst_stride);
+ __builtin_prefetch(src + 0 * src_stride);
+ __builtin_prefetch(src + 1 * src_stride);
+ __builtin_prefetch(src + 2 * src_stride);
+ __builtin_prefetch(src + 3 * src_stride);
+ d0 = convolve8_4(s0, s1, s2, s3, s4, s5, s6, s7, filters, filter3,
+ filter4);
+ d1 = convolve8_4(s1, s2, s3, s4, s5, s6, s7, s8, filters, filter3,
+ filter4);
+ d2 = convolve8_4(s2, s3, s4, s5, s6, s7, s8, s9, filters, filter3,
+ filter4);
+ d3 = convolve8_4(s3, s4, s5, s6, s7, s8, s9, s10, filters, filter3,
+ filter4);
+
+ d01 = vqrshrun_n_s16(vcombine_s16(d0, d1), 7);
+ d23 = vqrshrun_n_s16(vcombine_s16(d2, d3), 7);
+
+ d0123 = vld1q_lane_u32((uint32_t *)(dst + 0 * dst_stride), d0123, 0);
+ d0123 = vld1q_lane_u32((uint32_t *)(dst + 1 * dst_stride), d0123, 1);
+ d0123 = vld1q_lane_u32((uint32_t *)(dst + 2 * dst_stride), d0123, 2);
+ d0123 = vld1q_lane_u32((uint32_t *)(dst + 3 * dst_stride), d0123, 3);
+ d0123 = vreinterpretq_u32_u8(
+ vrhaddq_u8(vreinterpretq_u8_u32(d0123), vcombine_u8(d01, d23)));
+
+ vst1q_lane_u32((uint32_t *)dst, d0123, 0);
+ dst += dst_stride;
+ vst1q_lane_u32((uint32_t *)dst, d0123, 1);
+ dst += dst_stride;
+ vst1q_lane_u32((uint32_t *)dst, d0123, 2);
+ dst += dst_stride;
+ vst1q_lane_u32((uint32_t *)dst, d0123, 3);
+ dst += dst_stride;
+
+ s0 = s4;
+ s1 = s5;
+ s2 = s6;
+ s3 = s7;
+ s4 = s8;
+ s5 = s9;
+ s6 = s10;
+ h -= 4;
+ } while (h > 0);
+ } else {
+ const int16x8_t filter3 = vdupq_lane_s16(vget_low_s16(filters), 3);
+ const int16x8_t filter4 = vdupq_lane_s16(vget_high_s16(filters), 0);
+ int height;
+ const uint8_t *s;
+ uint8_t *d;
+ uint8x16_t d01, d23, dd01, dd23;
+ int16x8_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, d0, d1, d2, d3;
+
+ do {
+ __builtin_prefetch(src + 0 * src_stride);
+ __builtin_prefetch(src + 1 * src_stride);
+ __builtin_prefetch(src + 2 * src_stride);
+ __builtin_prefetch(src + 3 * src_stride);
+ __builtin_prefetch(src + 4 * src_stride);
+ __builtin_prefetch(src + 5 * src_stride);
+ __builtin_prefetch(src + 6 * src_stride);
+ s = src;
+ s0 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(s)));
+ s += src_stride;
+ s1 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(s)));
+ s += src_stride;
+ s2 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(s)));
+ s += src_stride;
+ s3 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(s)));
+ s += src_stride;
+ s4 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(s)));
+ s += src_stride;
+ s5 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(s)));
+ s += src_stride;
+ s6 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(s)));
+ s += src_stride;
+ d = dst;
+ height = h;
+
+ do {
+ s7 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(s)));
+ s += src_stride;
+ s8 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(s)));
+ s += src_stride;
+ s9 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(s)));
+ s += src_stride;
+ s10 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(s)));
+ s += src_stride;
+
+ __builtin_prefetch(dst + 0 * dst_stride);
+ __builtin_prefetch(dst + 1 * dst_stride);
+ __builtin_prefetch(dst + 2 * dst_stride);
+ __builtin_prefetch(dst + 3 * dst_stride);
+ __builtin_prefetch(src + 0 * src_stride);
+ __builtin_prefetch(src + 1 * src_stride);
+ __builtin_prefetch(src + 2 * src_stride);
+ __builtin_prefetch(src + 3 * src_stride);
+ d0 = convolve8_8(s0, s1, s2, s3, s4, s5, s6, s7, filters, filter3,
+ filter4);
+ d1 = convolve8_8(s1, s2, s3, s4, s5, s6, s7, s8, filters, filter3,
+ filter4);
+ d2 = convolve8_8(s2, s3, s4, s5, s6, s7, s8, s9, filters, filter3,
+ filter4);
+ d3 = convolve8_8(s3, s4, s5, s6, s7, s8, s9, s10, filters, filter3,
+ filter4);
+
+ d01 = vcombine_u8(vqrshrun_n_s16(d0, 7), vqrshrun_n_s16(d1, 7));
+ d23 = vcombine_u8(vqrshrun_n_s16(d2, 7), vqrshrun_n_s16(d3, 7));
+ dd01 = vcombine_u8(vld1_u8(d + 0 * dst_stride),
+ vld1_u8(d + 1 * dst_stride));
+ dd23 = vcombine_u8(vld1_u8(d + 2 * dst_stride),
+ vld1_u8(d + 3 * dst_stride));
+ dd01 = vrhaddq_u8(dd01, d01);
+ dd23 = vrhaddq_u8(dd23, d23);
+
+ vst1_u8(d, vget_low_u8(dd01));
+ d += dst_stride;
+ vst1_u8(d, vget_high_u8(dd01));
+ d += dst_stride;
+ vst1_u8(d, vget_low_u8(dd23));
+ d += dst_stride;
+ vst1_u8(d, vget_high_u8(dd23));
+ d += dst_stride;
+
+ s0 = s4;
+ s1 = s5;
+ s2 = s6;
+ s3 = s7;
+ s4 = s8;
+ s5 = s9;
+ s6 = s10;
+ height -= 4;
+ } while (height > 0);
+ src += 8;
+ dst += 8;
+ w -= 8;
+ } while (w > 0);
}
- return;
}
diff --git a/vpx_dsp/arm/vpx_convolve_avg_neon.c b/vpx_dsp/arm/vpx_convolve_avg_neon.c
index abc2511ea..04cb835fa 100644
--- a/vpx_dsp/arm/vpx_convolve_avg_neon.c
+++ b/vpx_dsp/arm/vpx_convolve_avg_neon.c
@@ -13,132 +13,127 @@
#include "./vpx_dsp_rtcd.h"
#include "vpx/vpx_integer.h"
-void vpx_convolve_avg_neon(const uint8_t *src, // r0
- ptrdiff_t src_stride, // r1
- uint8_t *dst, // r2
- ptrdiff_t dst_stride, // r3
+void vpx_convolve_avg_neon(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int filter_x_stride,
const int16_t *filter_y, int filter_y_stride, int w,
int h) {
- uint8_t *d;
- uint8x8_t d0u8, d1u8, d2u8, d3u8;
- uint32x2_t d0u32, d2u32;
- uint8x16_t q0u8, q1u8, q2u8, q3u8, q8u8, q9u8, q10u8, q11u8;
(void)filter_x;
(void)filter_x_stride;
(void)filter_y;
(void)filter_y_stride;
- d = dst;
- if (w > 32) { // avg64
- for (; h > 0; h -= 1) {
- q0u8 = vld1q_u8(src);
- q1u8 = vld1q_u8(src + 16);
- q2u8 = vld1q_u8(src + 32);
- q3u8 = vld1q_u8(src + 48);
+ if (w < 8) { // avg4
+ uint8x8_t s0, s1;
+ uint8x8_t dd0 = vdup_n_u8(0);
+ uint32x2x2_t s01;
+ do {
+ s0 = vld1_u8(src);
src += src_stride;
- q8u8 = vld1q_u8(d);
- q9u8 = vld1q_u8(d + 16);
- q10u8 = vld1q_u8(d + 32);
- q11u8 = vld1q_u8(d + 48);
- d += dst_stride;
-
- q0u8 = vrhaddq_u8(q0u8, q8u8);
- q1u8 = vrhaddq_u8(q1u8, q9u8);
- q2u8 = vrhaddq_u8(q2u8, q10u8);
- q3u8 = vrhaddq_u8(q3u8, q11u8);
-
- vst1q_u8(dst, q0u8);
- vst1q_u8(dst + 16, q1u8);
- vst1q_u8(dst + 32, q2u8);
- vst1q_u8(dst + 48, q3u8);
+ s1 = vld1_u8(src);
+ src += src_stride;
+ s01 = vzip_u32(vreinterpret_u32_u8(s0), vreinterpret_u32_u8(s1));
+ dd0 = vreinterpret_u8_u32(
+ vld1_lane_u32((const uint32_t *)dst, vreinterpret_u32_u8(dd0), 0));
+ dd0 = vreinterpret_u8_u32(vld1_lane_u32(
+ (const uint32_t *)(dst + dst_stride), vreinterpret_u32_u8(dd0), 1));
+ dd0 = vrhadd_u8(vreinterpret_u8_u32(s01.val[0]), dd0);
+ vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(dd0), 0);
dst += dst_stride;
- }
- } else if (w == 32) { // avg32
- for (; h > 0; h -= 2) {
- q0u8 = vld1q_u8(src);
- q1u8 = vld1q_u8(src + 16);
+ vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(dd0), 1);
+ dst += dst_stride;
+ h -= 2;
+ } while (h > 0);
+ } else if (w == 8) { // avg8
+ uint8x8_t s0, s1, d0, d1;
+ uint8x16_t s01, d01;
+ do {
+ s0 = vld1_u8(src);
src += src_stride;
- q2u8 = vld1q_u8(src);
- q3u8 = vld1q_u8(src + 16);
+ s1 = vld1_u8(src);
src += src_stride;
- q8u8 = vld1q_u8(d);
- q9u8 = vld1q_u8(d + 16);
- d += dst_stride;
- q10u8 = vld1q_u8(d);
- q11u8 = vld1q_u8(d + 16);
- d += dst_stride;
+ d0 = vld1_u8(dst);
+ d1 = vld1_u8(dst + dst_stride);
- q0u8 = vrhaddq_u8(q0u8, q8u8);
- q1u8 = vrhaddq_u8(q1u8, q9u8);
- q2u8 = vrhaddq_u8(q2u8, q10u8);
- q3u8 = vrhaddq_u8(q3u8, q11u8);
+ s01 = vcombine_u8(s0, s1);
+ d01 = vcombine_u8(d0, d1);
+ d01 = vrhaddq_u8(s01, d01);
- vst1q_u8(dst, q0u8);
- vst1q_u8(dst + 16, q1u8);
+ vst1_u8(dst, vget_low_u8(d01));
dst += dst_stride;
- vst1q_u8(dst, q2u8);
- vst1q_u8(dst + 16, q3u8);
+ vst1_u8(dst, vget_high_u8(d01));
dst += dst_stride;
- }
- } else if (w > 8) { // avg16
- for (; h > 0; h -= 2) {
- q0u8 = vld1q_u8(src);
+ h -= 2;
+ } while (h > 0);
+ } else if (w < 32) { // avg16
+ uint8x16_t s0, s1, d0, d1;
+ do {
+ s0 = vld1q_u8(src);
src += src_stride;
- q1u8 = vld1q_u8(src);
+ s1 = vld1q_u8(src);
src += src_stride;
- q2u8 = vld1q_u8(d);
- d += dst_stride;
- q3u8 = vld1q_u8(d);
- d += dst_stride;
+ d0 = vld1q_u8(dst);
+ d1 = vld1q_u8(dst + dst_stride);
- q0u8 = vrhaddq_u8(q0u8, q2u8);
- q1u8 = vrhaddq_u8(q1u8, q3u8);
+ d0 = vrhaddq_u8(s0, d0);
+ d1 = vrhaddq_u8(s1, d1);
- vst1q_u8(dst, q0u8);
+ vst1q_u8(dst, d0);
dst += dst_stride;
- vst1q_u8(dst, q1u8);
+ vst1q_u8(dst, d1);
dst += dst_stride;
- }
- } else if (w == 8) { // avg8
- for (; h > 0; h -= 2) {
- d0u8 = vld1_u8(src);
+ h -= 2;
+ } while (h > 0);
+ } else if (w == 32) { // avg32
+ uint8x16_t s0, s1, s2, s3, d0, d1, d2, d3;
+ do {
+ s0 = vld1q_u8(src);
+ s1 = vld1q_u8(src + 16);
src += src_stride;
- d1u8 = vld1_u8(src);
+ s2 = vld1q_u8(src);
+ s3 = vld1q_u8(src + 16);
src += src_stride;
- d2u8 = vld1_u8(d);
- d += dst_stride;
- d3u8 = vld1_u8(d);
- d += dst_stride;
+ d0 = vld1q_u8(dst);
+ d1 = vld1q_u8(dst + 16);
+ d2 = vld1q_u8(dst + dst_stride);
+ d3 = vld1q_u8(dst + dst_stride + 16);
- q0u8 = vcombine_u8(d0u8, d1u8);
- q1u8 = vcombine_u8(d2u8, d3u8);
- q0u8 = vrhaddq_u8(q0u8, q1u8);
+ d0 = vrhaddq_u8(s0, d0);
+ d1 = vrhaddq_u8(s1, d1);
+ d2 = vrhaddq_u8(s2, d2);
+ d3 = vrhaddq_u8(s3, d3);
- vst1_u8(dst, vget_low_u8(q0u8));
+ vst1q_u8(dst, d0);
+ vst1q_u8(dst + 16, d1);
dst += dst_stride;
- vst1_u8(dst, vget_high_u8(q0u8));
+ vst1q_u8(dst, d2);
+ vst1q_u8(dst + 16, d3);
dst += dst_stride;
- }
- } else { // avg4
- for (; h > 0; h -= 2) {
- d0u32 = vld1_lane_u32((const uint32_t *)src, d0u32, 0);
+ h -= 2;
+ } while (h > 0);
+ } else { // avg64
+ uint8x16_t s0, s1, s2, s3, d0, d1, d2, d3;
+ do {
+ s0 = vld1q_u8(src);
+ s1 = vld1q_u8(src + 16);
+ s2 = vld1q_u8(src + 32);
+ s3 = vld1q_u8(src + 48);
src += src_stride;
- d0u32 = vld1_lane_u32((const uint32_t *)src, d0u32, 1);
- src += src_stride;
- d2u32 = vld1_lane_u32((const uint32_t *)d, d2u32, 0);
- d += dst_stride;
- d2u32 = vld1_lane_u32((const uint32_t *)d, d2u32, 1);
- d += dst_stride;
+ d0 = vld1q_u8(dst);
+ d1 = vld1q_u8(dst + 16);
+ d2 = vld1q_u8(dst + 32);
+ d3 = vld1q_u8(dst + 48);
- d0u8 = vrhadd_u8(vreinterpret_u8_u32(d0u32), vreinterpret_u8_u32(d2u32));
+ d0 = vrhaddq_u8(s0, d0);
+ d1 = vrhaddq_u8(s1, d1);
+ d2 = vrhaddq_u8(s2, d2);
+ d3 = vrhaddq_u8(s3, d3);
- d0u32 = vreinterpret_u32_u8(d0u8);
- vst1_lane_u32((uint32_t *)dst, d0u32, 0);
- dst += dst_stride;
- vst1_lane_u32((uint32_t *)dst, d0u32, 1);
+ vst1q_u8(dst, d0);
+ vst1q_u8(dst + 16, d1);
+ vst1q_u8(dst + 32, d2);
+ vst1q_u8(dst + 48, d3);
dst += dst_stride;
- }
+ } while (--h);
}
- return;
}
diff --git a/vpx_dsp/arm/vpx_convolve_copy_neon.c b/vpx_dsp/arm/vpx_convolve_copy_neon.c
index fec189e0e..a8f690acd 100644
--- a/vpx_dsp/arm/vpx_convolve_copy_neon.c
+++ b/vpx_dsp/arm/vpx_convolve_copy_neon.c
@@ -13,80 +13,86 @@
#include "./vpx_dsp_rtcd.h"
#include "vpx/vpx_integer.h"
-void vpx_convolve_copy_neon(const uint8_t *src, // r0
- ptrdiff_t src_stride, // r1
- uint8_t *dst, // r2
- ptrdiff_t dst_stride, // r3
+void vpx_convolve_copy_neon(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int filter_x_stride,
const int16_t *filter_y, int filter_y_stride, int w,
int h) {
- uint8x8_t d0u8, d2u8;
- uint8x16_t q0u8, q1u8, q2u8, q3u8;
(void)filter_x;
(void)filter_x_stride;
(void)filter_y;
(void)filter_y_stride;
- if (w > 32) { // copy64
- for (; h > 0; h--) {
- q0u8 = vld1q_u8(src);
- q1u8 = vld1q_u8(src + 16);
- q2u8 = vld1q_u8(src + 32);
- q3u8 = vld1q_u8(src + 48);
+ if (w < 8) { // copy4
+ do {
+ *(uint32_t *)dst = *(const uint32_t *)src;
src += src_stride;
-
- vst1q_u8(dst, q0u8);
- vst1q_u8(dst + 16, q1u8);
- vst1q_u8(dst + 32, q2u8);
- vst1q_u8(dst + 48, q3u8);
dst += dst_stride;
- }
- } else if (w == 32) { // copy32
- for (; h > 0; h -= 2) {
- q0u8 = vld1q_u8(src);
- q1u8 = vld1q_u8(src + 16);
+ *(uint32_t *)dst = *(const uint32_t *)src;
src += src_stride;
- q2u8 = vld1q_u8(src);
- q3u8 = vld1q_u8(src + 16);
+ dst += dst_stride;
+ h -= 2;
+ } while (h > 0);
+ } else if (w == 8) { // copy8
+ uint8x8_t s0, s1;
+ do {
+ s0 = vld1_u8(src);
+ src += src_stride;
+ s1 = vld1_u8(src);
src += src_stride;
- vst1q_u8(dst, q0u8);
- vst1q_u8(dst + 16, q1u8);
+ vst1_u8(dst, s0);
dst += dst_stride;
- vst1q_u8(dst, q2u8);
- vst1q_u8(dst + 16, q3u8);
+ vst1_u8(dst, s1);
dst += dst_stride;
- }
- } else if (w > 8) { // copy16
- for (; h > 0; h -= 2) {
- q0u8 = vld1q_u8(src);
+ h -= 2;
+ } while (h > 0);
+ } else if (w < 32) { // copy16
+ uint8x16_t s0, s1;
+ do {
+ s0 = vld1q_u8(src);
src += src_stride;
- q1u8 = vld1q_u8(src);
+ s1 = vld1q_u8(src);
src += src_stride;
- vst1q_u8(dst, q0u8);
+ vst1q_u8(dst, s0);
dst += dst_stride;
- vst1q_u8(dst, q1u8);
+ vst1q_u8(dst, s1);
dst += dst_stride;
- }
- } else if (w == 8) { // copy8
- for (; h > 0; h -= 2) {
- d0u8 = vld1_u8(src);
+ h -= 2;
+ } while (h > 0);
+ } else if (w == 32) { // copy32
+ uint8x16_t s0, s1, s2, s3;
+ do {
+ s0 = vld1q_u8(src);
+ s1 = vld1q_u8(src + 16);
src += src_stride;
- d2u8 = vld1_u8(src);
+ s2 = vld1q_u8(src);
+ s3 = vld1q_u8(src + 16);
src += src_stride;
- vst1_u8(dst, d0u8);
+ vst1q_u8(dst, s0);
+ vst1q_u8(dst + 16, s1);
dst += dst_stride;
- vst1_u8(dst, d2u8);
+ vst1q_u8(dst, s2);
+ vst1q_u8(dst + 16, s3);
dst += dst_stride;
- }
- } else { // copy4
- for (; h > 0; h--) {
- *(uint32_t *)dst = *(const uint32_t *)src;
+ h -= 2;
+ } while (h > 0);
+ } else { // copy64
+ uint8x16_t s0, s1, s2, s3;
+ do {
+ s0 = vld1q_u8(src);
+ s1 = vld1q_u8(src + 16);
+ s2 = vld1q_u8(src + 32);
+ s3 = vld1q_u8(src + 48);
src += src_stride;
+
+ vst1q_u8(dst, s0);
+ vst1q_u8(dst + 16, s1);
+ vst1q_u8(dst + 32, s2);
+ vst1q_u8(dst + 48, s3);
dst += dst_stride;
- }
+ } while (--h);
}
- return;
}
diff --git a/vpx_dsp/arm/vpx_convolve_neon.c b/vpx_dsp/arm/vpx_convolve_neon.c
index c2d5895b7..5d7fa54fc 100644
--- a/vpx_dsp/arm/vpx_convolve_neon.c
+++ b/vpx_dsp/arm/vpx_convolve_neon.c
@@ -34,13 +34,13 @@ void vpx_convolve8_neon(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
* the temp buffer which has lots of extra room and is subsequently discarded
* this is safe if somewhat less than ideal.
*/
- vpx_convolve8_horiz_neon(src - src_stride * 3, src_stride, temp, 64, filter_x,
+ vpx_convolve8_horiz_neon(src - src_stride * 3, src_stride, temp, w, filter_x,
x_step_q4, filter_y, y_step_q4, w,
intermediate_height);
/* Step into the temp buffer 3 lines to get the actual frame data */
- vpx_convolve8_vert_neon(temp + 64 * 3, 64, dst, dst_stride, filter_x,
- x_step_q4, filter_y, y_step_q4, w, h);
+ vpx_convolve8_vert_neon(temp + w * 3, w, dst, dst_stride, filter_x, x_step_q4,
+ filter_y, y_step_q4, w, h);
}
void vpx_convolve8_avg_neon(const uint8_t *src, ptrdiff_t src_stride,
@@ -57,9 +57,9 @@ void vpx_convolve8_avg_neon(const uint8_t *src, ptrdiff_t src_stride,
/* This implementation has the same issues as above. In addition, we only want
* to average the values after both passes.
*/
- vpx_convolve8_horiz_neon(src - src_stride * 3, src_stride, temp, 64, filter_x,
+ vpx_convolve8_horiz_neon(src - src_stride * 3, src_stride, temp, w, filter_x,
x_step_q4, filter_y, y_step_q4, w,
intermediate_height);
- vpx_convolve8_avg_vert_neon(temp + 64 * 3, 64, dst, dst_stride, filter_x,
+ vpx_convolve8_avg_vert_neon(temp + w * 3, w, dst, dst_stride, filter_x,
x_step_q4, filter_y, y_step_q4, w, h);
}