summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeorge Steed <george.steed@arm.com>2023-03-01 22:44:38 +0000
committerGeorge Steed <george.steed@arm.com>2023-03-06 13:34:35 +0000
commit62827575462ecdb7790b60f5da302b6395cef798 (patch)
tree31f30acc3ad235e239c2c59aab980802501dbbb1
parent33f3ae34144ea42bbf97d812ef23dccfc4bb8662 (diff)
downloadlibvpx-62827575462ecdb7790b60f5da302b6395cef798.tar
libvpx-62827575462ecdb7790b60f5da302b6395cef798.tar.gz
libvpx-62827575462ecdb7790b60f5da302b6395cef798.tar.bz2
libvpx-62827575462ecdb7790b60f5da302b6395cef798.zip
Implement highbd_d63_predictor using Neon
Add Neon implementations of the highbd d63 predictor for 4x4, 8x8, 16x16 and 32x32 block sizes. Also update tests to add new corresponding cases. This re-lands commit 7cdf139e3d6237386e0f93bdb0bdc1b459c663bf, previously reverted in 7478b7e4e481562a4a13f233acb66a60462e1934. Compared to the previous implementation attempt we now correctly match the behaviour of the C code when handling the final element loaded from the 'above' input array. In particular: - The C code for a 4x4 block performs a full average of the last element rather than duplicating the final element from the input 'above' array. - The C code for other block sizes performs a full average for the stride=0 and stride=1, and otherwise shifts in duplicates of the final element from the input 'above' array. Notably this shifting for later strides _replaces_ the final element which we previously performed an average on (see {d0,d1}_ext in the code). It is worth noting that this difference is not caught by the existing VP9HighbdIntraPredTest test cases since the test vector initialisation contains this loop: for (int x = block_size; x < 2 * block_size; x++) { above_row_[x] = above_row_[block_size - 1]; } Since AVG2(a, a) and AVG3(a, a, a) are simply 'a', such differences in behaviour for the final element are not observed. Tested on AArch64 with: - ./test_libvpx --gtest_filter="*VP9HighbdIntraPredTest*" - ./test_libvpx --gtest_filter="*VP9/TestVectorTest.MD5Match*" - ./test_libvpx --gtest_filter="*VP9/ExternalFrameBufferMD5Test*" Speedups over the C code (higher is better): Microarch. | Compiler | Block | Speedup Neoverse N1 | LLVM 15 | 4x4 | 2.43 Neoverse N1 | LLVM 15 | 8x8 | 3.92 Neoverse N1 | LLVM 15 | 16x16 | 3.19 Neoverse N1 | LLVM 15 | 32x32 | 4.13 Neoverse N1 | GCC 12 | 4x4 | 2.92 Neoverse N1 | GCC 12 | 8x8 | 6.51 Neoverse N1 | GCC 12 | 16x16 | 4.55 Neoverse N1 | GCC 12 | 32x32 | 3.18 Neoverse V1 | LLVM 15 | 4x4 | 1.99 Neoverse V1 | LLVM 15 | 8x8 | 3.65 Neoverse V1 | LLVM 15 | 16x16 | 3.72 Neoverse V1 | LLVM 15 | 32x32 | 3.26 Neoverse V1 | GCC 12 | 4x4 | 2.39 Neoverse V1 | GCC 12 | 8x8 | 4.76 Neoverse V1 | GCC 12 | 16x16 | 3.24 Neoverse V1 | GCC 12 | 32x32 | 2.44 Change-Id: Iefaa774d6a20388b523eaa7f5df6bc5f5cf249e4
-rw-r--r--test/test_intra_pred_speed.cc12
-rw-r--r--test/vp9_intrapred_test.cc24
-rw-r--r--vpx_dsp/arm/highbd_intrapred_neon.c326
-rw-r--r--vpx_dsp/vpx_dsp_rtcd_defs.pl8
4 files changed, 362 insertions, 8 deletions
diff --git a/test/test_intra_pred_speed.cc b/test/test_intra_pred_speed.cc
index 24af471ea..e721a459a 100644
--- a/test/test_intra_pred_speed.cc
+++ b/test/test_intra_pred_speed.cc
@@ -566,14 +566,16 @@ HIGHBD_INTRA_PRED_TEST(
vpx_highbd_dc_128_predictor_4x4_neon, vpx_highbd_v_predictor_4x4_neon,
vpx_highbd_h_predictor_4x4_neon, vpx_highbd_d45_predictor_4x4_neon,
vpx_highbd_d135_predictor_4x4_neon, vpx_highbd_d117_predictor_4x4_neon,
- nullptr, nullptr, nullptr, vpx_highbd_tm_predictor_4x4_neon)
+ nullptr, nullptr, vpx_highbd_d63_predictor_4x4_neon,
+ vpx_highbd_tm_predictor_4x4_neon)
HIGHBD_INTRA_PRED_TEST(
NEON, TestHighbdIntraPred8, vpx_highbd_dc_predictor_8x8_neon,
vpx_highbd_dc_left_predictor_8x8_neon, vpx_highbd_dc_top_predictor_8x8_neon,
vpx_highbd_dc_128_predictor_8x8_neon, vpx_highbd_v_predictor_8x8_neon,
vpx_highbd_h_predictor_8x8_neon, vpx_highbd_d45_predictor_8x8_neon,
vpx_highbd_d135_predictor_8x8_neon, vpx_highbd_d117_predictor_8x8_neon,
- nullptr, nullptr, nullptr, vpx_highbd_tm_predictor_8x8_neon)
+ nullptr, nullptr, vpx_highbd_d63_predictor_8x8_neon,
+ vpx_highbd_tm_predictor_8x8_neon)
HIGHBD_INTRA_PRED_TEST(
NEON, TestHighbdIntraPred16, vpx_highbd_dc_predictor_16x16_neon,
vpx_highbd_dc_left_predictor_16x16_neon,
@@ -581,7 +583,8 @@ HIGHBD_INTRA_PRED_TEST(
vpx_highbd_dc_128_predictor_16x16_neon, vpx_highbd_v_predictor_16x16_neon,
vpx_highbd_h_predictor_16x16_neon, vpx_highbd_d45_predictor_16x16_neon,
vpx_highbd_d135_predictor_16x16_neon, vpx_highbd_d117_predictor_16x16_neon,
- nullptr, nullptr, nullptr, vpx_highbd_tm_predictor_16x16_neon)
+ nullptr, nullptr, vpx_highbd_d63_predictor_16x16_neon,
+ vpx_highbd_tm_predictor_16x16_neon)
HIGHBD_INTRA_PRED_TEST(
NEON, TestHighbdIntraPred32, vpx_highbd_dc_predictor_32x32_neon,
vpx_highbd_dc_left_predictor_32x32_neon,
@@ -589,7 +592,8 @@ HIGHBD_INTRA_PRED_TEST(
vpx_highbd_dc_128_predictor_32x32_neon, vpx_highbd_v_predictor_32x32_neon,
vpx_highbd_h_predictor_32x32_neon, vpx_highbd_d45_predictor_32x32_neon,
vpx_highbd_d135_predictor_32x32_neon, vpx_highbd_d117_predictor_32x32_neon,
- nullptr, nullptr, nullptr, vpx_highbd_tm_predictor_32x32_neon)
+ nullptr, nullptr, vpx_highbd_d63_predictor_32x32_neon,
+ vpx_highbd_tm_predictor_32x32_neon)
#endif // HAVE_NEON
#endif // CONFIG_VP9_HIGHBITDEPTH
diff --git a/test/vp9_intrapred_test.cc b/test/vp9_intrapred_test.cc
index 83e371df6..c4e0e78ac 100644
--- a/test/vp9_intrapred_test.cc
+++ b/test/vp9_intrapred_test.cc
@@ -848,6 +848,14 @@ INSTANTIATE_TEST_SUITE_P(
&vpx_highbd_d45_predictor_16x16_c, 16, 8),
HighbdIntraPredParam(&vpx_highbd_d45_predictor_32x32_neon,
&vpx_highbd_d45_predictor_32x32_c, 32, 8),
+ HighbdIntraPredParam(&vpx_highbd_d63_predictor_4x4_neon,
+ &vpx_highbd_d63_predictor_4x4_c, 4, 8),
+ HighbdIntraPredParam(&vpx_highbd_d63_predictor_8x8_neon,
+ &vpx_highbd_d63_predictor_8x8_c, 8, 8),
+ HighbdIntraPredParam(&vpx_highbd_d63_predictor_16x16_neon,
+ &vpx_highbd_d63_predictor_16x16_c, 16, 8),
+ HighbdIntraPredParam(&vpx_highbd_d63_predictor_32x32_neon,
+ &vpx_highbd_d63_predictor_32x32_c, 32, 8),
HighbdIntraPredParam(&vpx_highbd_d117_predictor_4x4_neon,
&vpx_highbd_d117_predictor_4x4_c, 4, 8),
HighbdIntraPredParam(&vpx_highbd_d117_predictor_8x8_neon,
@@ -932,6 +940,14 @@ INSTANTIATE_TEST_SUITE_P(
&vpx_highbd_d45_predictor_16x16_c, 16, 10),
HighbdIntraPredParam(&vpx_highbd_d45_predictor_32x32_neon,
&vpx_highbd_d45_predictor_32x32_c, 32, 10),
+ HighbdIntraPredParam(&vpx_highbd_d63_predictor_4x4_neon,
+ &vpx_highbd_d63_predictor_4x4_c, 4, 10),
+ HighbdIntraPredParam(&vpx_highbd_d63_predictor_8x8_neon,
+ &vpx_highbd_d63_predictor_8x8_c, 8, 10),
+ HighbdIntraPredParam(&vpx_highbd_d63_predictor_16x16_neon,
+ &vpx_highbd_d63_predictor_16x16_c, 16, 10),
+ HighbdIntraPredParam(&vpx_highbd_d63_predictor_32x32_neon,
+ &vpx_highbd_d63_predictor_32x32_c, 32, 10),
HighbdIntraPredParam(&vpx_highbd_d117_predictor_4x4_neon,
&vpx_highbd_d117_predictor_4x4_c, 4, 10),
HighbdIntraPredParam(&vpx_highbd_d117_predictor_8x8_neon,
@@ -1016,6 +1032,14 @@ INSTANTIATE_TEST_SUITE_P(
&vpx_highbd_d45_predictor_16x16_c, 16, 12),
HighbdIntraPredParam(&vpx_highbd_d45_predictor_32x32_neon,
&vpx_highbd_d45_predictor_32x32_c, 32, 12),
+ HighbdIntraPredParam(&vpx_highbd_d63_predictor_4x4_neon,
+ &vpx_highbd_d63_predictor_4x4_c, 4, 12),
+ HighbdIntraPredParam(&vpx_highbd_d63_predictor_8x8_neon,
+ &vpx_highbd_d63_predictor_8x8_c, 8, 12),
+ HighbdIntraPredParam(&vpx_highbd_d63_predictor_16x16_neon,
+ &vpx_highbd_d63_predictor_16x16_c, 16, 12),
+ HighbdIntraPredParam(&vpx_highbd_d63_predictor_32x32_neon,
+ &vpx_highbd_d63_predictor_32x32_c, 32, 12),
HighbdIntraPredParam(&vpx_highbd_d117_predictor_4x4_neon,
&vpx_highbd_d117_predictor_4x4_c, 4, 10),
HighbdIntraPredParam(&vpx_highbd_d117_predictor_8x8_neon,
diff --git a/vpx_dsp/arm/highbd_intrapred_neon.c b/vpx_dsp/arm/highbd_intrapred_neon.c
index dc1b27dc1..6b6ad95c1 100644
--- a/vpx_dsp/arm/highbd_intrapred_neon.c
+++ b/vpx_dsp/arm/highbd_intrapred_neon.c
@@ -453,6 +453,332 @@ void vpx_highbd_d45_predictor_32x32_neon(uint16_t *dst, ptrdiff_t stride,
// -----------------------------------------------------------------------------
+void vpx_highbd_d63_predictor_4x4_neon(uint16_t *dst, ptrdiff_t stride,
+ const uint16_t *above,
+ const uint16_t *left, int bd) {
+ uint16x4_t a0, a1, a2, a3, d0, d1, d2, d3;
+ (void)left;
+ (void)bd;
+
+ a0 = vld1_u16(above + 0);
+ a1 = vld1_u16(above + 1);
+ a2 = vld1_u16(above + 2);
+ a3 = vld1_u16(above + 3);
+
+ d0 = vrhadd_u16(a0, a1);
+ d1 = vrhadd_u16(vhadd_u16(a0, a2), a1);
+ d2 = vrhadd_u16(a1, a2);
+ d3 = vrhadd_u16(vhadd_u16(a1, a3), a2);
+
+ // Note that here we are performing a full avg calculation for the final
+ // elements rather than storing a duplicate of above[3], which differs
+ // (correctly) from the general scheme employed by the bs={8,16,32}
+ // implementations in order to match the original C implementation.
+ vst1_u16(dst + 0 * stride, d0);
+ vst1_u16(dst + 1 * stride, d1);
+ vst1_u16(dst + 2 * stride, d2);
+ vst1_u16(dst + 3 * stride, d3);
+}
+
+void vpx_highbd_d63_predictor_8x8_neon(uint16_t *dst, ptrdiff_t stride,
+ const uint16_t *above,
+ const uint16_t *left, int bd) {
+ uint16x8_t a0, a1, a2, a7, d0, d1, d0_ext, d1_ext;
+ (void)left;
+ (void)bd;
+
+ a0 = vld1q_u16(above + 0);
+ a1 = vld1q_u16(above + 1);
+ a2 = vld1q_u16(above + 2);
+ a7 = vld1q_dup_u16(above + 7);
+
+ d0 = vrhaddq_u16(a0, a1);
+ d1 = vrhaddq_u16(vhaddq_u16(a0, a2), a1);
+
+ // We want to store:
+ // stride=0 [ d0[0], d0[1], d0[2], d0[3], d0[4], d0[5], d0[6], d0[7] ]
+ // stride=1 [ d1[0], d1[1], d1[2], d1[3], d1[4], d1[5], d1[6], d1[7] ]
+ // stride=2 [ d0[1], d0[2], d0[3], d0[4], d0[5], d0[6], a[7], a[7] ]
+ // stride=3 [ d1[1], d1[2], d1[3], d1[4], d1[5], d1[6], a[7], a[7] ]
+ // stride=4 [ d0[2], d0[3], d0[4], d0[5], d0[6], a[7], a[7], a[7] ]
+ // stride=5 [ d1[2], d1[3], d1[4], d1[5], d1[6], a[7], a[7], a[7] ]
+ // stride=6 [ d0[3], d0[4], d0[5], d0[6], a[7], a[7], a[7], a[7] ]
+ // stride=7 [ d1[3], d1[4], d1[5], d1[6], a[7], a[7], a[7], a[7] ]
+ // Note in particular that d0[7] and d1[7] are only ever referenced in the
+ // stride=0 and stride=1 cases respectively, and in later strides are
+ // replaced by a copy of above[7]. These are equivalent if for i>7,
+ // above[i]==above[7], however that is not always the case.
+
+ // Strip out d0[7] and d1[7] so that we can replace it with an additional
+ // copy of above[7], the first vector here doesn't matter so just reuse
+ // d0/d1.
+ d0_ext = vextq_u16(d0, d0, 7);
+ d1_ext = vextq_u16(d1, d1, 7);
+
+ // Shuffle in duplicates of above[7] and store.
+ vst1q_u16(dst + 0 * stride, d0);
+ vst1q_u16(dst + 1 * stride, d1);
+ vst1q_u16(dst + 2 * stride, vextq_u16(d0_ext, a7, 2));
+ vst1q_u16(dst + 3 * stride, vextq_u16(d1_ext, a7, 2));
+ vst1q_u16(dst + 4 * stride, vextq_u16(d0_ext, a7, 3));
+ vst1q_u16(dst + 5 * stride, vextq_u16(d1_ext, a7, 3));
+ vst1q_u16(dst + 6 * stride, vextq_u16(d0_ext, a7, 4));
+ vst1q_u16(dst + 7 * stride, vextq_u16(d1_ext, a7, 4));
+}
+
+void vpx_highbd_d63_predictor_16x16_neon(uint16_t *dst, ptrdiff_t stride,
+ const uint16_t *above,
+ const uint16_t *left, int bd) {
+ // See vpx_highbd_d63_predictor_8x8_neon for details on the implementation.
+ uint16x8_t a0, a1, a2, a8, a9, a10, a15, d0[2], d1[2], d0_ext, d1_ext;
+ (void)left;
+ (void)bd;
+
+ a0 = vld1q_u16(above + 0);
+ a1 = vld1q_u16(above + 1);
+ a2 = vld1q_u16(above + 2);
+ a8 = vld1q_u16(above + 8);
+ a9 = vld1q_u16(above + 9);
+ a10 = vld1q_u16(above + 10);
+ a15 = vld1q_dup_u16(above + 15);
+
+ d0[0] = vrhaddq_u16(a0, a1);
+ d0[1] = vrhaddq_u16(a8, a9);
+ d1[0] = vrhaddq_u16(vhaddq_u16(a0, a2), a1);
+ d1[1] = vrhaddq_u16(vhaddq_u16(a8, a10), a9);
+
+ // Strip out the final element of d0/d1 so that we can replace it with an
+ // additional copy of above[7], the first vector here doesn't matter so just
+ // reuse the same vector.
+ d0_ext = vextq_u16(d0[1], d0[1], 7);
+ d1_ext = vextq_u16(d1[1], d1[1], 7);
+
+ // Shuffle in duplicates of above[7] and store. Note that cases involving
+ // {d0,d1}_ext require an extra shift to undo the shifting out of the final
+ // element from above.
+ vst1q_u16(dst + 0 * stride + 0, d0[0]);
+ vst1q_u16(dst + 0 * stride + 8, d0[1]);
+ vst1q_u16(dst + 1 * stride + 0, d1[0]);
+ vst1q_u16(dst + 1 * stride + 8, d1[1]);
+ vst1q_u16(dst + 2 * stride + 0, vextq_u16(d0[0], d0[1], 1));
+ vst1q_u16(dst + 2 * stride + 8, vextq_u16(d0_ext, a15, 2));
+ vst1q_u16(dst + 3 * stride + 0, vextq_u16(d1[0], d1[1], 1));
+ vst1q_u16(dst + 3 * stride + 8, vextq_u16(d1_ext, a15, 2));
+ vst1q_u16(dst + 4 * stride + 0, vextq_u16(d0[0], d0[1], 2));
+ vst1q_u16(dst + 4 * stride + 8, vextq_u16(d0_ext, a15, 3));
+ vst1q_u16(dst + 5 * stride + 0, vextq_u16(d1[0], d1[1], 2));
+ vst1q_u16(dst + 5 * stride + 8, vextq_u16(d1_ext, a15, 3));
+ vst1q_u16(dst + 6 * stride + 0, vextq_u16(d0[0], d0[1], 3));
+ vst1q_u16(dst + 6 * stride + 8, vextq_u16(d0_ext, a15, 4));
+ vst1q_u16(dst + 7 * stride + 0, vextq_u16(d1[0], d1[1], 3));
+ vst1q_u16(dst + 7 * stride + 8, vextq_u16(d1_ext, a15, 4));
+ vst1q_u16(dst + 8 * stride + 0, vextq_u16(d0[0], d0[1], 4));
+ vst1q_u16(dst + 8 * stride + 8, vextq_u16(d0_ext, a15, 5));
+ vst1q_u16(dst + 9 * stride + 0, vextq_u16(d1[0], d1[1], 4));
+ vst1q_u16(dst + 9 * stride + 8, vextq_u16(d1_ext, a15, 5));
+ vst1q_u16(dst + 10 * stride + 0, vextq_u16(d0[0], d0[1], 5));
+ vst1q_u16(dst + 10 * stride + 8, vextq_u16(d0_ext, a15, 6));
+ vst1q_u16(dst + 11 * stride + 0, vextq_u16(d1[0], d1[1], 5));
+ vst1q_u16(dst + 11 * stride + 8, vextq_u16(d1_ext, a15, 6));
+ vst1q_u16(dst + 12 * stride + 0, vextq_u16(d0[0], d0[1], 6));
+ vst1q_u16(dst + 12 * stride + 8, vextq_u16(d0_ext, a15, 7));
+ vst1q_u16(dst + 13 * stride + 0, vextq_u16(d1[0], d1[1], 6));
+ vst1q_u16(dst + 13 * stride + 8, vextq_u16(d1_ext, a15, 7));
+ vst1q_u16(dst + 14 * stride + 0, vextq_u16(d0[0], d0[1], 7));
+ vst1q_u16(dst + 14 * stride + 8, a15);
+ vst1q_u16(dst + 15 * stride + 0, vextq_u16(d1[0], d1[1], 7));
+ vst1q_u16(dst + 15 * stride + 8, a15);
+}
+
+void vpx_highbd_d63_predictor_32x32_neon(uint16_t *dst, ptrdiff_t stride,
+ const uint16_t *above,
+ const uint16_t *left, int bd) {
+ // See vpx_highbd_d63_predictor_8x8_neon for details on the implementation.
+ uint16x8_t a0, a1, a2, a8, a9, a10, a16, a17, a18, a24, a25, a26, a31, d0[4],
+ d1[4], d0_ext, d1_ext;
+ (void)left;
+ (void)bd;
+
+ a0 = vld1q_u16(above + 0);
+ a1 = vld1q_u16(above + 1);
+ a2 = vld1q_u16(above + 2);
+ a8 = vld1q_u16(above + 8);
+ a9 = vld1q_u16(above + 9);
+ a10 = vld1q_u16(above + 10);
+ a16 = vld1q_u16(above + 16);
+ a17 = vld1q_u16(above + 17);
+ a18 = vld1q_u16(above + 18);
+ a24 = vld1q_u16(above + 24);
+ a25 = vld1q_u16(above + 25);
+ a26 = vld1q_u16(above + 26);
+ a31 = vld1q_dup_u16(above + 31);
+
+ d0[0] = vrhaddq_u16(a0, a1);
+ d0[1] = vrhaddq_u16(a8, a9);
+ d0[2] = vrhaddq_u16(a16, a17);
+ d0[3] = vrhaddq_u16(a24, a25);
+ d1[0] = vrhaddq_u16(vhaddq_u16(a0, a2), a1);
+ d1[1] = vrhaddq_u16(vhaddq_u16(a8, a10), a9);
+ d1[2] = vrhaddq_u16(vhaddq_u16(a16, a18), a17);
+ d1[3] = vrhaddq_u16(vhaddq_u16(a24, a26), a25);
+
+ // Strip out the final element of d0/d1 so that we can replace it with an
+ // additional copy of above[7], the first vector here doesn't matter so just
+ // reuse the same vector.
+ d0_ext = vextq_u16(d0[3], d0[3], 7);
+ d1_ext = vextq_u16(d1[3], d1[3], 7);
+
+ // Shuffle in duplicates of above[7] and store. Note that cases involving
+ // {d0,d1}_ext require an extra shift to undo the shifting out of the final
+ // element from above.
+
+ vst1q_u16(dst + 0 * stride + 0, d0[0]);
+ vst1q_u16(dst + 0 * stride + 8, d0[1]);
+ vst1q_u16(dst + 0 * stride + 16, d0[2]);
+ vst1q_u16(dst + 0 * stride + 24, d0[3]);
+ vst1q_u16(dst + 1 * stride + 0, d1[0]);
+ vst1q_u16(dst + 1 * stride + 8, d1[1]);
+ vst1q_u16(dst + 1 * stride + 16, d1[2]);
+ vst1q_u16(dst + 1 * stride + 24, d1[3]);
+
+ vst1q_u16(dst + 2 * stride + 0, vextq_u16(d0[0], d0[1], 1));
+ vst1q_u16(dst + 2 * stride + 8, vextq_u16(d0[1], d0[2], 1));
+ vst1q_u16(dst + 2 * stride + 16, vextq_u16(d0[2], d0[3], 1));
+ vst1q_u16(dst + 2 * stride + 24, vextq_u16(d0_ext, a31, 2));
+ vst1q_u16(dst + 3 * stride + 0, vextq_u16(d1[0], d1[1], 1));
+ vst1q_u16(dst + 3 * stride + 8, vextq_u16(d1[1], d1[2], 1));
+ vst1q_u16(dst + 3 * stride + 16, vextq_u16(d1[2], d1[3], 1));
+ vst1q_u16(dst + 3 * stride + 24, vextq_u16(d1_ext, a31, 2));
+
+ vst1q_u16(dst + 4 * stride + 0, vextq_u16(d0[0], d0[1], 2));
+ vst1q_u16(dst + 4 * stride + 8, vextq_u16(d0[1], d0[2], 2));
+ vst1q_u16(dst + 4 * stride + 16, vextq_u16(d0[2], d0[3], 2));
+ vst1q_u16(dst + 4 * stride + 24, vextq_u16(d0_ext, a31, 3));
+ vst1q_u16(dst + 5 * stride + 0, vextq_u16(d1[0], d1[1], 2));
+ vst1q_u16(dst + 5 * stride + 8, vextq_u16(d1[1], d1[2], 2));
+ vst1q_u16(dst + 5 * stride + 16, vextq_u16(d1[2], d1[3], 2));
+ vst1q_u16(dst + 5 * stride + 24, vextq_u16(d1_ext, a31, 3));
+
+ vst1q_u16(dst + 6 * stride + 0, vextq_u16(d0[0], d0[1], 3));
+ vst1q_u16(dst + 6 * stride + 8, vextq_u16(d0[1], d0[2], 3));
+ vst1q_u16(dst + 6 * stride + 16, vextq_u16(d0[2], d0[3], 3));
+ vst1q_u16(dst + 6 * stride + 24, vextq_u16(d0_ext, a31, 4));
+ vst1q_u16(dst + 7 * stride + 0, vextq_u16(d1[0], d1[1], 3));
+ vst1q_u16(dst + 7 * stride + 8, vextq_u16(d1[1], d1[2], 3));
+ vst1q_u16(dst + 7 * stride + 16, vextq_u16(d1[2], d1[3], 3));
+ vst1q_u16(dst + 7 * stride + 24, vextq_u16(d1_ext, a31, 4));
+
+ vst1q_u16(dst + 8 * stride + 0, vextq_u16(d0[0], d0[1], 4));
+ vst1q_u16(dst + 8 * stride + 8, vextq_u16(d0[1], d0[2], 4));
+ vst1q_u16(dst + 8 * stride + 16, vextq_u16(d0[2], d0[3], 4));
+ vst1q_u16(dst + 8 * stride + 24, vextq_u16(d0_ext, a31, 5));
+ vst1q_u16(dst + 9 * stride + 0, vextq_u16(d1[0], d1[1], 4));
+ vst1q_u16(dst + 9 * stride + 8, vextq_u16(d1[1], d1[2], 4));
+ vst1q_u16(dst + 9 * stride + 16, vextq_u16(d1[2], d1[3], 4));
+ vst1q_u16(dst + 9 * stride + 24, vextq_u16(d1_ext, a31, 5));
+
+ vst1q_u16(dst + 10 * stride + 0, vextq_u16(d0[0], d0[1], 5));
+ vst1q_u16(dst + 10 * stride + 8, vextq_u16(d0[1], d0[2], 5));
+ vst1q_u16(dst + 10 * stride + 16, vextq_u16(d0[2], d0[3], 5));
+ vst1q_u16(dst + 10 * stride + 24, vextq_u16(d0_ext, a31, 6));
+ vst1q_u16(dst + 11 * stride + 0, vextq_u16(d1[0], d1[1], 5));
+ vst1q_u16(dst + 11 * stride + 8, vextq_u16(d1[1], d1[2], 5));
+ vst1q_u16(dst + 11 * stride + 16, vextq_u16(d1[2], d1[3], 5));
+ vst1q_u16(dst + 11 * stride + 24, vextq_u16(d1_ext, a31, 6));
+
+ vst1q_u16(dst + 12 * stride + 0, vextq_u16(d0[0], d0[1], 6));
+ vst1q_u16(dst + 12 * stride + 8, vextq_u16(d0[1], d0[2], 6));
+ vst1q_u16(dst + 12 * stride + 16, vextq_u16(d0[2], d0[3], 6));
+ vst1q_u16(dst + 12 * stride + 24, vextq_u16(d0_ext, a31, 7));
+ vst1q_u16(dst + 13 * stride + 0, vextq_u16(d1[0], d1[1], 6));
+ vst1q_u16(dst + 13 * stride + 8, vextq_u16(d1[1], d1[2], 6));
+ vst1q_u16(dst + 13 * stride + 16, vextq_u16(d1[2], d1[3], 6));
+ vst1q_u16(dst + 13 * stride + 24, vextq_u16(d1_ext, a31, 7));
+
+ vst1q_u16(dst + 14 * stride + 0, vextq_u16(d0[0], d0[1], 7));
+ vst1q_u16(dst + 14 * stride + 8, vextq_u16(d0[1], d0[2], 7));
+ vst1q_u16(dst + 14 * stride + 16, vextq_u16(d0[2], d0[3], 7));
+ vst1q_u16(dst + 14 * stride + 24, a31);
+ vst1q_u16(dst + 15 * stride + 0, vextq_u16(d1[0], d1[1], 7));
+ vst1q_u16(dst + 15 * stride + 8, vextq_u16(d1[1], d1[2], 7));
+ vst1q_u16(dst + 15 * stride + 16, vextq_u16(d1[2], d1[3], 7));
+ vst1q_u16(dst + 15 * stride + 24, a31);
+
+ vst1q_u16(dst + 16 * stride + 0, d0[1]);
+ vst1q_u16(dst + 16 * stride + 8, d0[2]);
+ vst1q_u16(dst + 16 * stride + 16, vextq_u16(d0_ext, a31, 1));
+ vst1q_u16(dst + 16 * stride + 24, a31);
+ vst1q_u16(dst + 17 * stride + 0, d1[1]);
+ vst1q_u16(dst + 17 * stride + 8, d1[2]);
+ vst1q_u16(dst + 17 * stride + 16, vextq_u16(d1_ext, a31, 1));
+ vst1q_u16(dst + 17 * stride + 24, a31);
+
+ vst1q_u16(dst + 18 * stride + 0, vextq_u16(d0[1], d0[2], 1));
+ vst1q_u16(dst + 18 * stride + 8, vextq_u16(d0[2], d0[3], 1));
+ vst1q_u16(dst + 18 * stride + 16, vextq_u16(d0_ext, a31, 2));
+ vst1q_u16(dst + 18 * stride + 24, a31);
+ vst1q_u16(dst + 19 * stride + 0, vextq_u16(d1[1], d1[2], 1));
+ vst1q_u16(dst + 19 * stride + 8, vextq_u16(d1[2], d1[3], 1));
+ vst1q_u16(dst + 19 * stride + 16, vextq_u16(d1_ext, a31, 2));
+ vst1q_u16(dst + 19 * stride + 24, a31);
+
+ vst1q_u16(dst + 20 * stride + 0, vextq_u16(d0[1], d0[2], 2));
+ vst1q_u16(dst + 20 * stride + 8, vextq_u16(d0[2], d0[3], 2));
+ vst1q_u16(dst + 20 * stride + 16, vextq_u16(d0_ext, a31, 3));
+ vst1q_u16(dst + 20 * stride + 24, a31);
+ vst1q_u16(dst + 21 * stride + 0, vextq_u16(d1[1], d1[2], 2));
+ vst1q_u16(dst + 21 * stride + 8, vextq_u16(d1[2], d1[3], 2));
+ vst1q_u16(dst + 21 * stride + 16, vextq_u16(d1_ext, a31, 3));
+ vst1q_u16(dst + 21 * stride + 24, a31);
+
+ vst1q_u16(dst + 22 * stride + 0, vextq_u16(d0[1], d0[2], 3));
+ vst1q_u16(dst + 22 * stride + 8, vextq_u16(d0[2], d0[3], 3));
+ vst1q_u16(dst + 22 * stride + 16, vextq_u16(d0_ext, a31, 4));
+ vst1q_u16(dst + 22 * stride + 24, a31);
+ vst1q_u16(dst + 23 * stride + 0, vextq_u16(d1[1], d1[2], 3));
+ vst1q_u16(dst + 23 * stride + 8, vextq_u16(d1[2], d1[3], 3));
+ vst1q_u16(dst + 23 * stride + 16, vextq_u16(d1_ext, a31, 4));
+ vst1q_u16(dst + 23 * stride + 24, a31);
+
+ vst1q_u16(dst + 24 * stride + 0, vextq_u16(d0[1], d0[2], 4));
+ vst1q_u16(dst + 24 * stride + 8, vextq_u16(d0[2], d0[3], 4));
+ vst1q_u16(dst + 24 * stride + 16, vextq_u16(d0_ext, a31, 5));
+ vst1q_u16(dst + 24 * stride + 24, a31);
+ vst1q_u16(dst + 25 * stride + 0, vextq_u16(d1[1], d1[2], 4));
+ vst1q_u16(dst + 25 * stride + 8, vextq_u16(d1[2], d1[3], 4));
+ vst1q_u16(dst + 25 * stride + 16, vextq_u16(d1_ext, a31, 5));
+ vst1q_u16(dst + 25 * stride + 24, a31);
+
+ vst1q_u16(dst + 26 * stride + 0, vextq_u16(d0[1], d0[2], 5));
+ vst1q_u16(dst + 26 * stride + 8, vextq_u16(d0[2], d0[3], 5));
+ vst1q_u16(dst + 26 * stride + 16, vextq_u16(d0_ext, a31, 6));
+ vst1q_u16(dst + 26 * stride + 24, a31);
+ vst1q_u16(dst + 27 * stride + 0, vextq_u16(d1[1], d1[2], 5));
+ vst1q_u16(dst + 27 * stride + 8, vextq_u16(d1[2], d1[3], 5));
+ vst1q_u16(dst + 27 * stride + 16, vextq_u16(d1_ext, a31, 6));
+ vst1q_u16(dst + 27 * stride + 24, a31);
+
+ vst1q_u16(dst + 28 * stride + 0, vextq_u16(d0[1], d0[2], 6));
+ vst1q_u16(dst + 28 * stride + 8, vextq_u16(d0[2], d0[3], 6));
+ vst1q_u16(dst + 28 * stride + 16, vextq_u16(d0_ext, a31, 7));
+ vst1q_u16(dst + 28 * stride + 24, a31);
+ vst1q_u16(dst + 29 * stride + 0, vextq_u16(d1[1], d1[2], 6));
+ vst1q_u16(dst + 29 * stride + 8, vextq_u16(d1[2], d1[3], 6));
+ vst1q_u16(dst + 29 * stride + 16, vextq_u16(d1_ext, a31, 7));
+ vst1q_u16(dst + 29 * stride + 24, a31);
+
+ vst1q_u16(dst + 30 * stride + 0, vextq_u16(d0[1], d0[2], 7));
+ vst1q_u16(dst + 30 * stride + 8, vextq_u16(d0[2], d0[3], 7));
+ vst1q_u16(dst + 30 * stride + 16, a31);
+ vst1q_u16(dst + 30 * stride + 24, a31);
+ vst1q_u16(dst + 31 * stride + 0, vextq_u16(d1[1], d1[2], 7));
+ vst1q_u16(dst + 31 * stride + 8, vextq_u16(d1[2], d1[3], 7));
+ vst1q_u16(dst + 31 * stride + 16, a31);
+ vst1q_u16(dst + 31 * stride + 24, a31);
+}
+
+// -----------------------------------------------------------------------------
+
void vpx_highbd_d117_predictor_4x4_neon(uint16_t *dst, ptrdiff_t stride,
const uint16_t *above,
const uint16_t *left, int bd) {
diff --git a/vpx_dsp/vpx_dsp_rtcd_defs.pl b/vpx_dsp/vpx_dsp_rtcd_defs.pl
index 652c553f9..48552a6f8 100644
--- a/vpx_dsp/vpx_dsp_rtcd_defs.pl
+++ b/vpx_dsp/vpx_dsp_rtcd_defs.pl
@@ -217,7 +217,7 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
specialize qw/vpx_highbd_d45_predictor_4x4 neon ssse3/;
add_proto qw/void vpx_highbd_d63_predictor_4x4/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_d63_predictor_4x4 sse2/;
+ specialize qw/vpx_highbd_d63_predictor_4x4 neon sse2/;
add_proto qw/void vpx_highbd_h_predictor_4x4/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_h_predictor_4x4 neon sse2/;
@@ -256,7 +256,7 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
specialize qw/vpx_highbd_d45_predictor_8x8 neon ssse3/;
add_proto qw/void vpx_highbd_d63_predictor_8x8/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_d63_predictor_8x8 ssse3/;
+ specialize qw/vpx_highbd_d63_predictor_8x8 neon ssse3/;
add_proto qw/void vpx_highbd_h_predictor_8x8/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_h_predictor_8x8 neon sse2/;
@@ -295,7 +295,7 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
specialize qw/vpx_highbd_d45_predictor_16x16 neon ssse3/;
add_proto qw/void vpx_highbd_d63_predictor_16x16/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_d63_predictor_16x16 ssse3/;
+ specialize qw/vpx_highbd_d63_predictor_16x16 neon ssse3/;
add_proto qw/void vpx_highbd_h_predictor_16x16/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_h_predictor_16x16 neon sse2/;
@@ -334,7 +334,7 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
specialize qw/vpx_highbd_d45_predictor_32x32 neon ssse3/;
add_proto qw/void vpx_highbd_d63_predictor_32x32/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
- specialize qw/vpx_highbd_d63_predictor_32x32 ssse3/;
+ specialize qw/vpx_highbd_d63_predictor_32x32 neon ssse3/;
add_proto qw/void vpx_highbd_h_predictor_32x32/, "uint16_t *dst, ptrdiff_t stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_h_predictor_32x32 neon sse2/;