summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJames Zern <jzern@google.com>2023-03-28 20:14:12 +0000
committerGerrit Code Review <noreply-gerritcodereview@google.com>2023-03-28 20:14:12 +0000
commitaba570ac9572ed5e85c38b9e8d9315d7d6777876 (patch)
tree6975680612c9330a094cd6ad550428234b48bb4c
parent8e58d504fad29ac16cec9d75c8c8c013ddc42496 (diff)
parent100ca0356ddf67e92da35699d92bc180429d0bc1 (diff)
downloadlibvpx-aba570ac9572ed5e85c38b9e8d9315d7d6777876.tar
libvpx-aba570ac9572ed5e85c38b9e8d9315d7d6777876.tar.gz
libvpx-aba570ac9572ed5e85c38b9e8d9315d7d6777876.tar.bz2
libvpx-aba570ac9572ed5e85c38b9e8d9315d7d6777876.zip
Merge changes If83ff1ad,I8fb00a15,Iaad58e77,Iac166d60 into main
* changes: Randomize second half of above_row_ in intrapred tests for Neon Allow non-uniform above array in d63 predictor Neon impl Allow non-uniform above array in d45 predictor Neon impl Allow non-uniform above array in highbd d45 predictor Neon impl
-rw-r--r--test/vp9_intrapred_test.cc16
-rw-r--r--vpx_dsp/arm/highbd_intrapred_neon.c275
-rw-r--r--vpx_dsp/arm/intrapred_neon.c443
3 files changed, 428 insertions, 306 deletions
diff --git a/test/vp9_intrapred_test.cc b/test/vp9_intrapred_test.cc
index cec903161..6de7cf8d0 100644
--- a/test/vp9_intrapred_test.cc
+++ b/test/vp9_intrapred_test.cc
@@ -55,6 +55,21 @@ class IntraPredTest : public ::testing::TestWithParam<PredParam> {
ref_dst_ = ref_dst;
int error_count = 0;
for (int i = 0; i < count_test_block; ++i) {
+ // TODO(webm:1797): Some of the optimised predictor implementations rely
+ // on the trailing half of the above_row_ being a copy of the final
+ // element, however relying on this in some cases can cause the MD5 tests
+ // to fail. We have fixed all of these cases for Neon, so fill the whole
+ // of above_row_ randomly.
+#if HAVE_NEON
+ // Fill edges with random data, try first with saturated values.
+ for (int x = -1; x < 2 * block_size; x++) {
+ if (i == 0) {
+ above_row_[x] = mask_;
+ } else {
+ above_row_[x] = rnd.Rand16() & mask_;
+ }
+ }
+#else
// Fill edges with random data, try first with saturated values.
for (int x = -1; x < block_size; x++) {
if (i == 0) {
@@ -66,6 +81,7 @@ class IntraPredTest : public ::testing::TestWithParam<PredParam> {
for (int x = block_size; x < 2 * block_size; x++) {
above_row_[x] = above_row_[block_size - 1];
}
+#endif
for (int y = 0; y < block_size; y++) {
if (i == 0) {
left_col_[y] = mask_;
diff --git a/vpx_dsp/arm/highbd_intrapred_neon.c b/vpx_dsp/arm/highbd_intrapred_neon.c
index 503900915..05c9c7f19 100644
--- a/vpx_dsp/arm/highbd_intrapred_neon.c
+++ b/vpx_dsp/arm/highbd_intrapred_neon.c
@@ -289,166 +289,179 @@ void vpx_highbd_dc_128_predictor_32x32_neon(uint16_t *dst, ptrdiff_t stride,
void vpx_highbd_d45_predictor_4x4_neon(uint16_t *dst, ptrdiff_t stride,
const uint16_t *above,
const uint16_t *left, int bd) {
- const uint16x8_t ABCDEFGH = vld1q_u16(above);
- const uint16x8_t BCDEFGH0 = vld1q_u16(above + 1);
- const uint16x8_t CDEFGH00 = vld1q_u16(above + 2);
- const uint16x8_t avg1 = vhaddq_u16(ABCDEFGH, CDEFGH00);
- const uint16x8_t avg2 = vrhaddq_u16(avg1, BCDEFGH0);
- const uint16x4_t avg2_low = vget_low_u16(avg2);
- const uint16x4_t avg2_high = vget_high_u16(avg2);
- const uint16x4_t r1 = vext_u16(avg2_low, avg2_high, 1);
- const uint16x4_t r2 = vext_u16(avg2_low, avg2_high, 2);
- const uint16x4_t r3 = vext_u16(avg2_low, avg2_high, 3);
+ uint16x8_t a0, a1, a2, d0;
+ uint16_t a7;
(void)left;
(void)bd;
- vst1_u16(dst, avg2_low);
- dst += stride;
- vst1_u16(dst, r1);
- dst += stride;
- vst1_u16(dst, r2);
- dst += stride;
- vst1_u16(dst, r3);
- vst1q_lane_u16(dst + 3, ABCDEFGH, 7);
-}
-static INLINE void d45_store_8(uint16_t **dst, const ptrdiff_t stride,
- const uint16x8_t above_right, uint16x8_t *row) {
- *row = vextq_u16(*row, above_right, 1);
- vst1q_u16(*dst, *row);
- *dst += stride;
+ a0 = vld1q_u16(above);
+ a7 = above[7];
+
+ // [ above[1], ..., above[6], x, x ]
+ a1 = vextq_u16(a0, a0, 1);
+ // [ above[2], ..., above[7], x, x ]
+ a2 = vextq_u16(a0, a0, 2);
+
+ // d0[0] = AVG3(above[0], above[1], above[2]);
+ // ...
+ // d0[5] = AVG3(above[5], above[6], above[7]);
+ // d0[6] = x (don't care)
+ // d0[7] = x (don't care)
+ d0 = vrhaddq_u16(vhaddq_u16(a0, a2), a1);
+
+ // We want:
+ // stride=0 [ d0[0], d0[1], d0[2], d0[3] ]
+ // stride=1 [ d0[1], d0[2], d0[3], d0[4] ]
+ // stride=2 [ d0[2], d0[3], d0[4], d0[5] ]
+ // stride=2 [ d0[3], d0[4], d0[5], above[7] ]
+ vst1_u16(dst + 0 * stride, vget_low_u16(d0));
+ vst1_u16(dst + 1 * stride, vget_low_u16(vextq_u16(d0, d0, 1)));
+ vst1_u16(dst + 2 * stride, vget_low_u16(vextq_u16(d0, d0, 2)));
+ vst1_u16(dst + 3 * stride, vget_low_u16(vextq_u16(d0, d0, 3)));
+
+ // We stored d0[6] above, so fixup into above[7].
+ dst[3 * stride + 3] = a7;
}
void vpx_highbd_d45_predictor_8x8_neon(uint16_t *dst, ptrdiff_t stride,
const uint16_t *above,
const uint16_t *left, int bd) {
- const uint16x8_t A0 = vld1q_u16(above);
- const uint16x8_t above_right = vdupq_lane_u16(vget_high_u16(A0), 3);
- const uint16x8_t A1 = vld1q_u16(above + 1);
- const uint16x8_t A2 = vld1q_u16(above + 2);
- const uint16x8_t avg1 = vhaddq_u16(A0, A2);
- uint16x8_t row = vrhaddq_u16(avg1, A1);
+ uint16x8_t ax0, a0, a1, a7, d0;
(void)left;
(void)bd;
- vst1q_u16(dst, row);
- dst += stride;
- d45_store_8(&dst, stride, above_right, &row);
- d45_store_8(&dst, stride, above_right, &row);
- d45_store_8(&dst, stride, above_right, &row);
- d45_store_8(&dst, stride, above_right, &row);
- d45_store_8(&dst, stride, above_right, &row);
- d45_store_8(&dst, stride, above_right, &row);
- vst1q_u16(dst, above_right);
-}
-
-static INLINE void d45_store_16(uint16_t **dst, const ptrdiff_t stride,
- const uint16x8_t above_right, uint16x8_t *row_0,
- uint16x8_t *row_1) {
- *row_0 = vextq_u16(*row_0, *row_1, 1);
- *row_1 = vextq_u16(*row_1, above_right, 1);
- vst1q_u16(*dst, *row_0);
- *dst += 8;
- vst1q_u16(*dst, *row_1);
- *dst += stride - 8;
+ a0 = vld1q_u16(above + 0);
+ a1 = vld1q_u16(above + 1);
+ a7 = vld1q_dup_u16(above + 7);
+
+ // We want to calculate the AVG3 result in lanes 1-7 inclusive so we can
+ // shift in above[7] later, so shift a0 across by one to get the right
+ // inputs:
+ // [ x, above[0], ... , above[6] ]
+ ax0 = vextq_u16(a0, a0, 7);
+
+ // d0[0] = x (don't care)
+ // d0[1] = AVG3(above[0], above[1], above[2]);
+ // ...
+ // d0[7] = AVG3(above[6], above[7], above[8]);
+ d0 = vrhaddq_u16(vhaddq_u16(ax0, a1), a0);
+
+ // Undo the earlier ext, incrementally shift in duplicates of above[7].
+ vst1q_u16(dst + 0 * stride, vextq_u16(d0, a7, 1));
+ vst1q_u16(dst + 1 * stride, vextq_u16(d0, a7, 2));
+ vst1q_u16(dst + 2 * stride, vextq_u16(d0, a7, 3));
+ vst1q_u16(dst + 3 * stride, vextq_u16(d0, a7, 4));
+ vst1q_u16(dst + 4 * stride, vextq_u16(d0, a7, 5));
+ vst1q_u16(dst + 5 * stride, vextq_u16(d0, a7, 6));
+ vst1q_u16(dst + 6 * stride, vextq_u16(d0, a7, 7));
+ vst1q_u16(dst + 7 * stride, a7);
}
void vpx_highbd_d45_predictor_16x16_neon(uint16_t *dst, ptrdiff_t stride,
const uint16_t *above,
const uint16_t *left, int bd) {
- const uint16x8_t A0_0 = vld1q_u16(above);
- const uint16x8_t A0_1 = vld1q_u16(above + 8);
- const uint16x8_t above_right = vdupq_lane_u16(vget_high_u16(A0_1), 3);
- const uint16x8_t A1_0 = vld1q_u16(above + 1);
- const uint16x8_t A1_1 = vld1q_u16(above + 9);
- const uint16x8_t A2_0 = vld1q_u16(above + 2);
- const uint16x8_t A2_1 = vld1q_u16(above + 10);
- const uint16x8_t avg_0 = vhaddq_u16(A0_0, A2_0);
- const uint16x8_t avg_1 = vhaddq_u16(A0_1, A2_1);
- uint16x8_t row_0 = vrhaddq_u16(avg_0, A1_0);
- uint16x8_t row_1 = vrhaddq_u16(avg_1, A1_1);
+ uint16x8_t ax0, a0, a1, a7, a8, a9, a15, d0[2];
(void)left;
(void)bd;
- vst1q_u16(dst, row_0);
- vst1q_u16(dst + 8, row_1);
- dst += stride;
- d45_store_16(&dst, stride, above_right, &row_0, &row_1);
- d45_store_16(&dst, stride, above_right, &row_0, &row_1);
- d45_store_16(&dst, stride, above_right, &row_0, &row_1);
- d45_store_16(&dst, stride, above_right, &row_0, &row_1);
- d45_store_16(&dst, stride, above_right, &row_0, &row_1);
- d45_store_16(&dst, stride, above_right, &row_0, &row_1);
- d45_store_16(&dst, stride, above_right, &row_0, &row_1);
- d45_store_16(&dst, stride, above_right, &row_0, &row_1);
- d45_store_16(&dst, stride, above_right, &row_0, &row_1);
- d45_store_16(&dst, stride, above_right, &row_0, &row_1);
- d45_store_16(&dst, stride, above_right, &row_0, &row_1);
- d45_store_16(&dst, stride, above_right, &row_0, &row_1);
- d45_store_16(&dst, stride, above_right, &row_0, &row_1);
- d45_store_16(&dst, stride, above_right, &row_0, &row_1);
- vst1q_u16(dst, above_right);
- vst1q_u16(dst + 8, above_right);
+ a0 = vld1q_u16(above + 0);
+ a1 = vld1q_u16(above + 1);
+ a7 = vld1q_u16(above + 7);
+ a8 = vld1q_u16(above + 8);
+ a9 = vld1q_u16(above + 9);
+ a15 = vld1q_dup_u16(above + 15);
+
+ // [ x, above[0], ... , above[6] ]
+ ax0 = vextq_u16(a0, a0, 7);
+
+ // We have one unused lane here to leave room to shift in above[15] in the
+ // last lane:
+ // d0[0][1] = x (don't care)
+ // d0[0][1] = AVG3(above[0], above[1], above[2]);
+ // ...
+ // d0[0][7] = AVG3(above[6], above[7], above[8]);
+ // d0[1][0] = AVG3(above[7], above[8], above[9]);
+ // ...
+ // d0[1][7] = AVG3(above[14], above[15], above[16]);
+ d0[0] = vrhaddq_u16(vhaddq_u16(ax0, a1), a0);
+ d0[1] = vrhaddq_u16(vhaddq_u16(a7, a9), a8);
+
+ // Incrementally shift in duplicates of above[15].
+ vst1q_u16(dst + 0 * stride + 0, vextq_u16(d0[0], d0[1], 1));
+ vst1q_u16(dst + 0 * stride + 8, vextq_u16(d0[1], a15, 1));
+ vst1q_u16(dst + 1 * stride + 0, vextq_u16(d0[0], d0[1], 2));
+ vst1q_u16(dst + 1 * stride + 8, vextq_u16(d0[1], a15, 2));
+ vst1q_u16(dst + 2 * stride + 0, vextq_u16(d0[0], d0[1], 3));
+ vst1q_u16(dst + 2 * stride + 8, vextq_u16(d0[1], a15, 3));
+ vst1q_u16(dst + 3 * stride + 0, vextq_u16(d0[0], d0[1], 4));
+ vst1q_u16(dst + 3 * stride + 8, vextq_u16(d0[1], a15, 4));
+ vst1q_u16(dst + 4 * stride + 0, vextq_u16(d0[0], d0[1], 5));
+ vst1q_u16(dst + 4 * stride + 8, vextq_u16(d0[1], a15, 5));
+ vst1q_u16(dst + 5 * stride + 0, vextq_u16(d0[0], d0[1], 6));
+ vst1q_u16(dst + 5 * stride + 8, vextq_u16(d0[1], a15, 6));
+ vst1q_u16(dst + 6 * stride + 0, vextq_u16(d0[0], d0[1], 7));
+ vst1q_u16(dst + 6 * stride + 8, vextq_u16(d0[1], a15, 7));
+ vst1q_u16(dst + 7 * stride + 0, d0[1]);
+ vst1q_u16(dst + 7 * stride + 8, a15);
+
+ vst1q_u16(dst + 8 * stride + 0, vextq_u16(d0[1], a15, 1));
+ vst1q_u16(dst + 8 * stride + 8, a15);
+ vst1q_u16(dst + 9 * stride + 0, vextq_u16(d0[1], a15, 2));
+ vst1q_u16(dst + 9 * stride + 8, a15);
+ vst1q_u16(dst + 10 * stride + 0, vextq_u16(d0[1], a15, 3));
+ vst1q_u16(dst + 10 * stride + 8, a15);
+ vst1q_u16(dst + 11 * stride + 0, vextq_u16(d0[1], a15, 4));
+ vst1q_u16(dst + 11 * stride + 8, a15);
+ vst1q_u16(dst + 12 * stride + 0, vextq_u16(d0[1], a15, 5));
+ vst1q_u16(dst + 12 * stride + 8, a15);
+ vst1q_u16(dst + 13 * stride + 0, vextq_u16(d0[1], a15, 6));
+ vst1q_u16(dst + 13 * stride + 8, a15);
+ vst1q_u16(dst + 14 * stride + 0, vextq_u16(d0[1], a15, 7));
+ vst1q_u16(dst + 14 * stride + 8, a15);
+ vst1q_u16(dst + 15 * stride + 0, a15);
+ vst1q_u16(dst + 15 * stride + 8, a15);
}
void vpx_highbd_d45_predictor_32x32_neon(uint16_t *dst, ptrdiff_t stride,
const uint16_t *above,
const uint16_t *left, int bd) {
- const uint16x8_t A0_0 = vld1q_u16(above);
- const uint16x8_t A0_1 = vld1q_u16(above + 8);
- const uint16x8_t A0_2 = vld1q_u16(above + 16);
- const uint16x8_t A0_3 = vld1q_u16(above + 24);
- const uint16x8_t above_right = vdupq_lane_u16(vget_high_u16(A0_3), 3);
- const uint16x8_t A1_0 = vld1q_u16(above + 1);
- const uint16x8_t A1_1 = vld1q_u16(above + 9);
- const uint16x8_t A1_2 = vld1q_u16(above + 17);
- const uint16x8_t A1_3 = vld1q_u16(above + 25);
- const uint16x8_t A2_0 = vld1q_u16(above + 2);
- const uint16x8_t A2_1 = vld1q_u16(above + 10);
- const uint16x8_t A2_2 = vld1q_u16(above + 18);
- const uint16x8_t A2_3 = vld1q_u16(above + 26);
- const uint16x8_t avg_0 = vhaddq_u16(A0_0, A2_0);
- const uint16x8_t avg_1 = vhaddq_u16(A0_1, A2_1);
- const uint16x8_t avg_2 = vhaddq_u16(A0_2, A2_2);
- const uint16x8_t avg_3 = vhaddq_u16(A0_3, A2_3);
- uint16x8_t row_0 = vrhaddq_u16(avg_0, A1_0);
- uint16x8_t row_1 = vrhaddq_u16(avg_1, A1_1);
- uint16x8_t row_2 = vrhaddq_u16(avg_2, A1_2);
- uint16x8_t row_3 = vrhaddq_u16(avg_3, A1_3);
+ uint16x8_t ax0, a0, a1, a7, a8, a9, a15, a16, a17, a23, a24, a25, a31, d0[4];
int i;
(void)left;
(void)bd;
- vst1q_u16(dst, row_0);
- dst += 8;
- vst1q_u16(dst, row_1);
- dst += 8;
- vst1q_u16(dst, row_2);
- dst += 8;
- vst1q_u16(dst, row_3);
- dst += stride - 24;
-
- for (i = 0; i < 30; ++i) {
- row_0 = vextq_u16(row_0, row_1, 1);
- row_1 = vextq_u16(row_1, row_2, 1);
- row_2 = vextq_u16(row_2, row_3, 1);
- row_3 = vextq_u16(row_3, above_right, 1);
- vst1q_u16(dst, row_0);
- dst += 8;
- vst1q_u16(dst, row_1);
- dst += 8;
- vst1q_u16(dst, row_2);
- dst += 8;
- vst1q_u16(dst, row_3);
- dst += stride - 24;
- }
+ a0 = vld1q_u16(above + 0);
+ a1 = vld1q_u16(above + 1);
+ a7 = vld1q_u16(above + 7);
+ a8 = vld1q_u16(above + 8);
+ a9 = vld1q_u16(above + 9);
+ a15 = vld1q_u16(above + 15);
+ a16 = vld1q_u16(above + 16);
+ a17 = vld1q_u16(above + 17);
+ a23 = vld1q_u16(above + 23);
+ a24 = vld1q_u16(above + 24);
+ a25 = vld1q_u16(above + 25);
+ a31 = vld1q_dup_u16(above + 31);
- vst1q_u16(dst, above_right);
- dst += 8;
- vst1q_u16(dst, above_right);
- dst += 8;
- vst1q_u16(dst, above_right);
- dst += 8;
- vst1q_u16(dst, above_right);
+ // [ x, above[0], ... , above[6] ]
+ ax0 = vextq_u16(a0, a0, 7);
+
+ d0[0] = vrhaddq_u16(vhaddq_u16(ax0, a1), a0);
+ d0[1] = vrhaddq_u16(vhaddq_u16(a7, a9), a8);
+ d0[2] = vrhaddq_u16(vhaddq_u16(a15, a17), a16);
+ d0[3] = vrhaddq_u16(vhaddq_u16(a23, a25), a24);
+
+ for (i = 0; i < 32; ++i) {
+ d0[0] = vextq_u16(d0[0], d0[1], 1);
+ d0[1] = vextq_u16(d0[1], d0[2], 1);
+ d0[2] = vextq_u16(d0[2], d0[3], 1);
+ d0[3] = vextq_u16(d0[3], a31, 1);
+ vst1q_u16(dst + 0, d0[0]);
+ vst1q_u16(dst + 8, d0[1]);
+ vst1q_u16(dst + 16, d0[2]);
+ vst1q_u16(dst + 24, d0[3]);
+ dst += stride;
+ }
}
// -----------------------------------------------------------------------------
diff --git a/vpx_dsp/arm/intrapred_neon.c b/vpx_dsp/arm/intrapred_neon.c
index 892310f15..3d117fa93 100644
--- a/vpx_dsp/arm/intrapred_neon.c
+++ b/vpx_dsp/arm/intrapred_neon.c
@@ -263,123 +263,202 @@ void vpx_dc_128_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
void vpx_d45_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
- const uint8x8_t ABCDEFGH = vld1_u8(above);
- const uint64x1_t A1 = vshr_n_u64(vreinterpret_u64_u8(ABCDEFGH), 8);
- const uint64x1_t A2 = vshr_n_u64(vreinterpret_u64_u8(ABCDEFGH), 16);
- const uint8x8_t BCDEFGH0 = vreinterpret_u8_u64(A1);
- const uint8x8_t CDEFGH00 = vreinterpret_u8_u64(A2);
- const uint8x8_t avg1 = vhadd_u8(ABCDEFGH, CDEFGH00);
- const uint8x8_t avg2 = vrhadd_u8(avg1, BCDEFGH0);
- const uint64x1_t avg2_u64 = vreinterpret_u64_u8(avg2);
- const uint32x2_t r0 = vreinterpret_u32_u8(avg2);
- const uint32x2_t r1 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 8));
- const uint32x2_t r2 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 16));
- const uint32x2_t r3 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 24));
+ uint8x8_t a0, a1, a2, d0;
+ uint8_t a7;
(void)left;
- vst1_lane_u32((uint32_t *)(dst + 0 * stride), r0, 0);
- vst1_lane_u32((uint32_t *)(dst + 1 * stride), r1, 0);
- vst1_lane_u32((uint32_t *)(dst + 2 * stride), r2, 0);
- vst1_lane_u32((uint32_t *)(dst + 3 * stride), r3, 0);
- vst1_lane_u8(dst + 3 * stride + 3, ABCDEFGH, 7);
-}
-static INLINE void d45_store_8(uint8_t **dst, const ptrdiff_t stride,
- const uint8x8_t above_right, uint8x8_t *row) {
- *row = vext_u8(*row, above_right, 1);
- vst1_u8(*dst, *row);
- *dst += stride;
+ a0 = vld1_u8(above);
+ a7 = above[7];
+
+ // [ above[1], ..., above[6], x, x ]
+ a1 = vext_u8(a0, a0, 1);
+ // [ above[2], ..., above[7], x, x ]
+ a2 = vext_u8(a0, a0, 2);
+
+ // d0[0] = AVG3(above[0], above[1], above[2]);
+ // ...
+ // d0[5] = AVG3(above[5], above[6], above[7]);
+ // d0[6] = x (don't care)
+ // d0[7] = x (don't care)
+ d0 = vrhadd_u8(vhadd_u8(a0, a2), a1);
+
+ // We want:
+ // stride=0 [ d0[0], d0[1], d0[2], d0[3] ]
+ // stride=1 [ d0[1], d0[2], d0[3], d0[4] ]
+ // stride=2 [ d0[2], d0[3], d0[4], d0[5] ]
+ // stride=2 [ d0[3], d0[4], d0[5], above[7] ]
+ store_u8_4x1(dst + 0 * stride, d0);
+ store_u8_4x1(dst + 1 * stride, vext_u8(d0, d0, 1));
+ store_u8_4x1(dst + 2 * stride, vext_u8(d0, d0, 2));
+ store_u8_4x1(dst + 3 * stride, vext_u8(d0, d0, 3));
+
+ // We stored d0[6] above, so fixup into above[7].
+ dst[3 * stride + 3] = a7;
}
void vpx_d45_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
- const uint8x8_t A0 = vld1_u8(above);
- const uint8x8_t above_right = vdup_lane_u8(A0, 7);
- const uint8x8_t A1 = vext_u8(A0, above_right, 1);
- const uint8x8_t A2 = vext_u8(A0, above_right, 2);
- const uint8x8_t avg1 = vhadd_u8(A0, A2);
- uint8x8_t row = vrhadd_u8(avg1, A1);
+ uint8x8_t ax0, a0, a1, a7, d0;
(void)left;
- vst1_u8(dst, row);
- dst += stride;
- d45_store_8(&dst, stride, above_right, &row);
- d45_store_8(&dst, stride, above_right, &row);
- d45_store_8(&dst, stride, above_right, &row);
- d45_store_8(&dst, stride, above_right, &row);
- d45_store_8(&dst, stride, above_right, &row);
- d45_store_8(&dst, stride, above_right, &row);
- vst1_u8(dst, above_right);
-}
-
-static INLINE void d45_store_16(uint8_t **dst, const ptrdiff_t stride,
- const uint8x16_t above_right, uint8x16_t *row) {
- *row = vextq_u8(*row, above_right, 1);
- vst1q_u8(*dst, *row);
- *dst += stride;
+ a0 = vld1_u8(above + 0);
+ a1 = vld1_u8(above + 1);
+ a7 = vld1_dup_u8(above + 7);
+
+ // We want to calculate the AVG3 result in lanes 1-7 inclusive so we can
+ // shift in above[7] later, so shift a0 across by one to get the right
+ // inputs:
+ // [ x, above[0], ... , above[6] ]
+ ax0 = vext_u8(a0, a0, 7);
+
+ // d0[0] = x (don't care)
+ // d0[1] = AVG3(above[0], above[1], above[2]);
+ // ...
+ // d0[7] = AVG3(above[6], above[7], above[8]);
+ d0 = vrhadd_u8(vhadd_u8(ax0, a1), a0);
+
+ // Undo the earlier ext, incrementally shift in duplicates of above[7].
+ vst1_u8(dst + 0 * stride, vext_u8(d0, a7, 1));
+ vst1_u8(dst + 1 * stride, vext_u8(d0, a7, 2));
+ vst1_u8(dst + 2 * stride, vext_u8(d0, a7, 3));
+ vst1_u8(dst + 3 * stride, vext_u8(d0, a7, 4));
+ vst1_u8(dst + 4 * stride, vext_u8(d0, a7, 5));
+ vst1_u8(dst + 5 * stride, vext_u8(d0, a7, 6));
+ vst1_u8(dst + 6 * stride, vext_u8(d0, a7, 7));
+ vst1_u8(dst + 7 * stride, a7);
}
void vpx_d45_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
- const uint8x16_t A0 = vld1q_u8(above);
- const uint8x16_t above_right = vdupq_lane_u8(vget_high_u8(A0), 7);
- const uint8x16_t A1 = vextq_u8(A0, above_right, 1);
- const uint8x16_t A2 = vextq_u8(A0, above_right, 2);
- const uint8x16_t avg1 = vhaddq_u8(A0, A2);
- uint8x16_t row = vrhaddq_u8(avg1, A1);
+ uint8x16_t ax0, a0, a1, a15, d0;
(void)left;
- vst1q_u8(dst, row);
- dst += stride;
- d45_store_16(&dst, stride, above_right, &row);
- d45_store_16(&dst, stride, above_right, &row);
- d45_store_16(&dst, stride, above_right, &row);
- d45_store_16(&dst, stride, above_right, &row);
- d45_store_16(&dst, stride, above_right, &row);
- d45_store_16(&dst, stride, above_right, &row);
- d45_store_16(&dst, stride, above_right, &row);
- d45_store_16(&dst, stride, above_right, &row);
- d45_store_16(&dst, stride, above_right, &row);
- d45_store_16(&dst, stride, above_right, &row);
- d45_store_16(&dst, stride, above_right, &row);
- d45_store_16(&dst, stride, above_right, &row);
- d45_store_16(&dst, stride, above_right, &row);
- d45_store_16(&dst, stride, above_right, &row);
- vst1q_u8(dst, above_right);
+ a0 = vld1q_u8(above + 0);
+ a1 = vld1q_u8(above + 1);
+ a15 = vld1q_dup_u8(above + 15);
+
+ // We want to calculate the AVG3 result in lanes 1-15 inclusive so we can
+ // shift in above[15] later, so shift a0 across by one to get the right
+ // inputs:
+ // [ x, above[0], ... , above[14] ]
+ ax0 = vextq_u8(a0, a0, 15);
+
+ // d0[0] = x (don't care)
+ // d0[1] = AVG3(above[0], above[1], above[2]);
+ // ...
+ // d0[15] = AVG3(above[14], above[15], above[16]);
+ d0 = vrhaddq_u8(vhaddq_u8(ax0, a1), a0);
+
+ // Undo the earlier ext, incrementally shift in duplicates of above[15].
+ vst1q_u8(dst + 0 * stride, vextq_u8(d0, a15, 1));
+ vst1q_u8(dst + 1 * stride, vextq_u8(d0, a15, 2));
+ vst1q_u8(dst + 2 * stride, vextq_u8(d0, a15, 3));
+ vst1q_u8(dst + 3 * stride, vextq_u8(d0, a15, 4));
+ vst1q_u8(dst + 4 * stride, vextq_u8(d0, a15, 5));
+ vst1q_u8(dst + 5 * stride, vextq_u8(d0, a15, 6));
+ vst1q_u8(dst + 6 * stride, vextq_u8(d0, a15, 7));
+ vst1q_u8(dst + 7 * stride, vextq_u8(d0, a15, 8));
+ vst1q_u8(dst + 8 * stride, vextq_u8(d0, a15, 9));
+ vst1q_u8(dst + 9 * stride, vextq_u8(d0, a15, 10));
+ vst1q_u8(dst + 10 * stride, vextq_u8(d0, a15, 11));
+ vst1q_u8(dst + 11 * stride, vextq_u8(d0, a15, 12));
+ vst1q_u8(dst + 12 * stride, vextq_u8(d0, a15, 13));
+ vst1q_u8(dst + 13 * stride, vextq_u8(d0, a15, 14));
+ vst1q_u8(dst + 14 * stride, vextq_u8(d0, a15, 15));
+ vst1q_u8(dst + 15 * stride, a15);
}
void vpx_d45_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
- const uint8x16_t A0_0 = vld1q_u8(above);
- const uint8x16_t A0_1 = vld1q_u8(above + 16);
- const uint8x16_t above_right = vdupq_lane_u8(vget_high_u8(A0_1), 7);
- const uint8x16_t A1_0 = vld1q_u8(above + 1);
- const uint8x16_t A1_1 = vld1q_u8(above + 17);
- const uint8x16_t A2_0 = vld1q_u8(above + 2);
- const uint8x16_t A2_1 = vld1q_u8(above + 18);
- const uint8x16_t avg_0 = vhaddq_u8(A0_0, A2_0);
- const uint8x16_t avg_1 = vhaddq_u8(A0_1, A2_1);
- uint8x16_t row_0 = vrhaddq_u8(avg_0, A1_0);
- uint8x16_t row_1 = vrhaddq_u8(avg_1, A1_1);
- int i;
+ uint8x16_t ax0, a0, a1, a15, a16, a17, a31, d0[2];
(void)left;
- vst1q_u8(dst, row_0);
- dst += 16;
- vst1q_u8(dst, row_1);
- dst += stride - 16;
+ a0 = vld1q_u8(above + 0);
+ a1 = vld1q_u8(above + 1);
+ a15 = vld1q_u8(above + 15);
+ a16 = vld1q_u8(above + 16);
+ a17 = vld1q_u8(above + 17);
+ a31 = vld1q_dup_u8(above + 31);
- for (i = 0; i < 30; ++i) {
- row_0 = vextq_u8(row_0, row_1, 1);
- row_1 = vextq_u8(row_1, above_right, 1);
- vst1q_u8(dst, row_0);
- dst += 16;
- vst1q_u8(dst, row_1);
- dst += stride - 16;
- }
+ // We want to calculate the AVG3 result in lanes 1-15 inclusive so we can
+ // shift in above[15] later, so shift a0 across by one to get the right
+ // inputs:
+ // [ x, above[0], ... , above[14] ]
+ ax0 = vextq_u8(a0, a0, 15);
- vst1q_u8(dst, above_right);
- dst += 16;
- vst1q_u8(dst, row_1);
+ // d0[0] = x (don't care)
+ // d0[1] = AVG3(above[0], above[1], above[2]);
+ // ...
+ // d0[15] = AVG3(above[14], above[15], above[16]);
+ d0[0] = vrhaddq_u8(vhaddq_u8(ax0, a1), a0);
+ d0[1] = vrhaddq_u8(vhaddq_u8(a15, a17), a16);
+
+ // Undo the earlier ext, incrementally shift in duplicates of above[15].
+ vst1q_u8(dst + 0 * stride + 0, vextq_u8(d0[0], d0[1], 1));
+ vst1q_u8(dst + 0 * stride + 16, vextq_u8(d0[1], a31, 1));
+ vst1q_u8(dst + 1 * stride + 0, vextq_u8(d0[0], d0[1], 2));
+ vst1q_u8(dst + 1 * stride + 16, vextq_u8(d0[1], a31, 2));
+ vst1q_u8(dst + 2 * stride + 0, vextq_u8(d0[0], d0[1], 3));
+ vst1q_u8(dst + 2 * stride + 16, vextq_u8(d0[1], a31, 3));
+ vst1q_u8(dst + 3 * stride + 0, vextq_u8(d0[0], d0[1], 4));
+ vst1q_u8(dst + 3 * stride + 16, vextq_u8(d0[1], a31, 4));
+ vst1q_u8(dst + 4 * stride + 0, vextq_u8(d0[0], d0[1], 5));
+ vst1q_u8(dst + 4 * stride + 16, vextq_u8(d0[1], a31, 5));
+ vst1q_u8(dst + 5 * stride + 0, vextq_u8(d0[0], d0[1], 6));
+ vst1q_u8(dst + 5 * stride + 16, vextq_u8(d0[1], a31, 6));
+ vst1q_u8(dst + 6 * stride + 0, vextq_u8(d0[0], d0[1], 7));
+ vst1q_u8(dst + 6 * stride + 16, vextq_u8(d0[1], a31, 7));
+ vst1q_u8(dst + 7 * stride + 0, vextq_u8(d0[0], d0[1], 8));
+ vst1q_u8(dst + 7 * stride + 16, vextq_u8(d0[1], a31, 8));
+ vst1q_u8(dst + 8 * stride + 0, vextq_u8(d0[0], d0[1], 9));
+ vst1q_u8(dst + 8 * stride + 16, vextq_u8(d0[1], a31, 9));
+ vst1q_u8(dst + 9 * stride + 0, vextq_u8(d0[0], d0[1], 10));
+ vst1q_u8(dst + 9 * stride + 16, vextq_u8(d0[1], a31, 10));
+ vst1q_u8(dst + 10 * stride + 0, vextq_u8(d0[0], d0[1], 11));
+ vst1q_u8(dst + 10 * stride + 16, vextq_u8(d0[1], a31, 11));
+ vst1q_u8(dst + 11 * stride + 0, vextq_u8(d0[0], d0[1], 12));
+ vst1q_u8(dst + 11 * stride + 16, vextq_u8(d0[1], a31, 12));
+ vst1q_u8(dst + 12 * stride + 0, vextq_u8(d0[0], d0[1], 13));
+ vst1q_u8(dst + 12 * stride + 16, vextq_u8(d0[1], a31, 13));
+ vst1q_u8(dst + 13 * stride + 0, vextq_u8(d0[0], d0[1], 14));
+ vst1q_u8(dst + 13 * stride + 16, vextq_u8(d0[1], a31, 14));
+ vst1q_u8(dst + 14 * stride + 0, vextq_u8(d0[0], d0[1], 15));
+ vst1q_u8(dst + 14 * stride + 16, vextq_u8(d0[1], a31, 15));
+ vst1q_u8(dst + 15 * stride + 0, d0[1]);
+ vst1q_u8(dst + 15 * stride + 16, a31);
+
+ vst1q_u8(dst + 16 * stride + 0, vextq_u8(d0[1], a31, 1));
+ vst1q_u8(dst + 16 * stride + 16, a31);
+ vst1q_u8(dst + 17 * stride + 0, vextq_u8(d0[1], a31, 2));
+ vst1q_u8(dst + 17 * stride + 16, a31);
+ vst1q_u8(dst + 18 * stride + 0, vextq_u8(d0[1], a31, 3));
+ vst1q_u8(dst + 18 * stride + 16, a31);
+ vst1q_u8(dst + 19 * stride + 0, vextq_u8(d0[1], a31, 4));
+ vst1q_u8(dst + 19 * stride + 16, a31);
+ vst1q_u8(dst + 20 * stride + 0, vextq_u8(d0[1], a31, 5));
+ vst1q_u8(dst + 20 * stride + 16, a31);
+ vst1q_u8(dst + 21 * stride + 0, vextq_u8(d0[1], a31, 6));
+ vst1q_u8(dst + 21 * stride + 16, a31);
+ vst1q_u8(dst + 22 * stride + 0, vextq_u8(d0[1], a31, 7));
+ vst1q_u8(dst + 22 * stride + 16, a31);
+ vst1q_u8(dst + 23 * stride + 0, vextq_u8(d0[1], a31, 8));
+ vst1q_u8(dst + 23 * stride + 16, a31);
+ vst1q_u8(dst + 24 * stride + 0, vextq_u8(d0[1], a31, 9));
+ vst1q_u8(dst + 24 * stride + 16, a31);
+ vst1q_u8(dst + 25 * stride + 0, vextq_u8(d0[1], a31, 10));
+ vst1q_u8(dst + 25 * stride + 16, a31);
+ vst1q_u8(dst + 26 * stride + 0, vextq_u8(d0[1], a31, 11));
+ vst1q_u8(dst + 26 * stride + 16, a31);
+ vst1q_u8(dst + 27 * stride + 0, vextq_u8(d0[1], a31, 12));
+ vst1q_u8(dst + 27 * stride + 16, a31);
+ vst1q_u8(dst + 28 * stride + 0, vextq_u8(d0[1], a31, 13));
+ vst1q_u8(dst + 28 * stride + 16, a31);
+ vst1q_u8(dst + 29 * stride + 0, vextq_u8(d0[1], a31, 14));
+ vst1q_u8(dst + 29 * stride + 16, a31);
+ vst1q_u8(dst + 30 * stride + 0, vextq_u8(d0[1], a31, 15));
+ vst1q_u8(dst + 30 * stride + 16, a31);
+ vst1q_u8(dst + 31 * stride + 0, a31);
+ vst1q_u8(dst + 31 * stride + 16, a31);
}
// -----------------------------------------------------------------------------
@@ -420,12 +499,16 @@ void vpx_d63_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
vst1_u8(dst + 0 * stride, d0);
vst1_u8(dst + 1 * stride, d1);
- vst1_u8(dst + 2 * stride, vext_u8(d0, a7, 1));
- vst1_u8(dst + 3 * stride, vext_u8(d1, a7, 1));
- vst1_u8(dst + 4 * stride, vext_u8(d0, a7, 2));
- vst1_u8(dst + 5 * stride, vext_u8(d1, a7, 2));
- vst1_u8(dst + 6 * stride, vext_u8(d0, a7, 3));
- vst1_u8(dst + 7 * stride, vext_u8(d1, a7, 3));
+
+ d0 = vext_u8(d0, d0, 7);
+ d1 = vext_u8(d1, d1, 7);
+
+ vst1_u8(dst + 2 * stride, vext_u8(d0, a7, 2));
+ vst1_u8(dst + 3 * stride, vext_u8(d1, a7, 2));
+ vst1_u8(dst + 4 * stride, vext_u8(d0, a7, 3));
+ vst1_u8(dst + 5 * stride, vext_u8(d1, a7, 3));
+ vst1_u8(dst + 6 * stride, vext_u8(d0, a7, 4));
+ vst1_u8(dst + 7 * stride, vext_u8(d1, a7, 4));
}
void vpx_d63_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
@@ -443,20 +526,24 @@ void vpx_d63_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
vst1q_u8(dst + 0 * stride, d0);
vst1q_u8(dst + 1 * stride, d1);
- vst1q_u8(dst + 2 * stride, vextq_u8(d0, a15, 1));
- vst1q_u8(dst + 3 * stride, vextq_u8(d1, a15, 1));
- vst1q_u8(dst + 4 * stride, vextq_u8(d0, a15, 2));
- vst1q_u8(dst + 5 * stride, vextq_u8(d1, a15, 2));
- vst1q_u8(dst + 6 * stride, vextq_u8(d0, a15, 3));
- vst1q_u8(dst + 7 * stride, vextq_u8(d1, a15, 3));
- vst1q_u8(dst + 8 * stride, vextq_u8(d0, a15, 4));
- vst1q_u8(dst + 9 * stride, vextq_u8(d1, a15, 4));
- vst1q_u8(dst + 10 * stride, vextq_u8(d0, a15, 5));
- vst1q_u8(dst + 11 * stride, vextq_u8(d1, a15, 5));
- vst1q_u8(dst + 12 * stride, vextq_u8(d0, a15, 6));
- vst1q_u8(dst + 13 * stride, vextq_u8(d1, a15, 6));
- vst1q_u8(dst + 14 * stride, vextq_u8(d0, a15, 7));
- vst1q_u8(dst + 15 * stride, vextq_u8(d1, a15, 7));
+
+ d0 = vextq_u8(d0, d0, 15);
+ d1 = vextq_u8(d1, d1, 15);
+
+ vst1q_u8(dst + 2 * stride, vextq_u8(d0, a15, 2));
+ vst1q_u8(dst + 3 * stride, vextq_u8(d1, a15, 2));
+ vst1q_u8(dst + 4 * stride, vextq_u8(d0, a15, 3));
+ vst1q_u8(dst + 5 * stride, vextq_u8(d1, a15, 3));
+ vst1q_u8(dst + 6 * stride, vextq_u8(d0, a15, 4));
+ vst1q_u8(dst + 7 * stride, vextq_u8(d1, a15, 4));
+ vst1q_u8(dst + 8 * stride, vextq_u8(d0, a15, 5));
+ vst1q_u8(dst + 9 * stride, vextq_u8(d1, a15, 5));
+ vst1q_u8(dst + 10 * stride, vextq_u8(d0, a15, 6));
+ vst1q_u8(dst + 11 * stride, vextq_u8(d1, a15, 6));
+ vst1q_u8(dst + 12 * stride, vextq_u8(d0, a15, 7));
+ vst1q_u8(dst + 13 * stride, vextq_u8(d1, a15, 7));
+ vst1q_u8(dst + 14 * stride, vextq_u8(d0, a15, 8));
+ vst1q_u8(dst + 15 * stride, vextq_u8(d1, a15, 8));
}
void vpx_d63_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
@@ -481,66 +568,72 @@ void vpx_d63_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
vst1q_u8(dst + 0 * stride + 16, d0_hi);
vst1q_u8(dst + 1 * stride + 0, d1_lo);
vst1q_u8(dst + 1 * stride + 16, d1_hi);
- vst1q_u8(dst + 2 * stride + 0, vextq_u8(d0_lo, d0_hi, 1));
- vst1q_u8(dst + 2 * stride + 16, vextq_u8(d0_hi, a31, 1));
- vst1q_u8(dst + 3 * stride + 0, vextq_u8(d1_lo, d1_hi, 1));
- vst1q_u8(dst + 3 * stride + 16, vextq_u8(d1_hi, a31, 1));
- vst1q_u8(dst + 4 * stride + 0, vextq_u8(d0_lo, d0_hi, 2));
- vst1q_u8(dst + 4 * stride + 16, vextq_u8(d0_hi, a31, 2));
- vst1q_u8(dst + 5 * stride + 0, vextq_u8(d1_lo, d1_hi, 2));
- vst1q_u8(dst + 5 * stride + 16, vextq_u8(d1_hi, a31, 2));
- vst1q_u8(dst + 6 * stride + 0, vextq_u8(d0_lo, d0_hi, 3));
- vst1q_u8(dst + 6 * stride + 16, vextq_u8(d0_hi, a31, 3));
- vst1q_u8(dst + 7 * stride + 0, vextq_u8(d1_lo, d1_hi, 3));
- vst1q_u8(dst + 7 * stride + 16, vextq_u8(d1_hi, a31, 3));
- vst1q_u8(dst + 8 * stride + 0, vextq_u8(d0_lo, d0_hi, 4));
- vst1q_u8(dst + 8 * stride + 16, vextq_u8(d0_hi, a31, 4));
- vst1q_u8(dst + 9 * stride + 0, vextq_u8(d1_lo, d1_hi, 4));
- vst1q_u8(dst + 9 * stride + 16, vextq_u8(d1_hi, a31, 4));
- vst1q_u8(dst + 10 * stride + 0, vextq_u8(d0_lo, d0_hi, 5));
- vst1q_u8(dst + 10 * stride + 16, vextq_u8(d0_hi, a31, 5));
- vst1q_u8(dst + 11 * stride + 0, vextq_u8(d1_lo, d1_hi, 5));
- vst1q_u8(dst + 11 * stride + 16, vextq_u8(d1_hi, a31, 5));
- vst1q_u8(dst + 12 * stride + 0, vextq_u8(d0_lo, d0_hi, 6));
- vst1q_u8(dst + 12 * stride + 16, vextq_u8(d0_hi, a31, 6));
- vst1q_u8(dst + 13 * stride + 0, vextq_u8(d1_lo, d1_hi, 6));
- vst1q_u8(dst + 13 * stride + 16, vextq_u8(d1_hi, a31, 6));
- vst1q_u8(dst + 14 * stride + 0, vextq_u8(d0_lo, d0_hi, 7));
- vst1q_u8(dst + 14 * stride + 16, vextq_u8(d0_hi, a31, 7));
- vst1q_u8(dst + 15 * stride + 0, vextq_u8(d1_lo, d1_hi, 7));
- vst1q_u8(dst + 15 * stride + 16, vextq_u8(d1_hi, a31, 7));
- vst1q_u8(dst + 16 * stride + 0, vextq_u8(d0_lo, d0_hi, 8));
- vst1q_u8(dst + 16 * stride + 16, vextq_u8(d0_hi, a31, 8));
- vst1q_u8(dst + 17 * stride + 0, vextq_u8(d1_lo, d1_hi, 8));
- vst1q_u8(dst + 17 * stride + 16, vextq_u8(d1_hi, a31, 8));
- vst1q_u8(dst + 18 * stride + 0, vextq_u8(d0_lo, d0_hi, 9));
- vst1q_u8(dst + 18 * stride + 16, vextq_u8(d0_hi, a31, 9));
- vst1q_u8(dst + 19 * stride + 0, vextq_u8(d1_lo, d1_hi, 9));
- vst1q_u8(dst + 19 * stride + 16, vextq_u8(d1_hi, a31, 9));
- vst1q_u8(dst + 20 * stride + 0, vextq_u8(d0_lo, d0_hi, 10));
- vst1q_u8(dst + 20 * stride + 16, vextq_u8(d0_hi, a31, 10));
- vst1q_u8(dst + 21 * stride + 0, vextq_u8(d1_lo, d1_hi, 10));
- vst1q_u8(dst + 21 * stride + 16, vextq_u8(d1_hi, a31, 10));
- vst1q_u8(dst + 22 * stride + 0, vextq_u8(d0_lo, d0_hi, 11));
- vst1q_u8(dst + 22 * stride + 16, vextq_u8(d0_hi, a31, 11));
- vst1q_u8(dst + 23 * stride + 0, vextq_u8(d1_lo, d1_hi, 11));
- vst1q_u8(dst + 23 * stride + 16, vextq_u8(d1_hi, a31, 11));
- vst1q_u8(dst + 24 * stride + 0, vextq_u8(d0_lo, d0_hi, 12));
- vst1q_u8(dst + 24 * stride + 16, vextq_u8(d0_hi, a31, 12));
- vst1q_u8(dst + 25 * stride + 0, vextq_u8(d1_lo, d1_hi, 12));
- vst1q_u8(dst + 25 * stride + 16, vextq_u8(d1_hi, a31, 12));
- vst1q_u8(dst + 26 * stride + 0, vextq_u8(d0_lo, d0_hi, 13));
- vst1q_u8(dst + 26 * stride + 16, vextq_u8(d0_hi, a31, 13));
- vst1q_u8(dst + 27 * stride + 0, vextq_u8(d1_lo, d1_hi, 13));
- vst1q_u8(dst + 27 * stride + 16, vextq_u8(d1_hi, a31, 13));
- vst1q_u8(dst + 28 * stride + 0, vextq_u8(d0_lo, d0_hi, 14));
- vst1q_u8(dst + 28 * stride + 16, vextq_u8(d0_hi, a31, 14));
- vst1q_u8(dst + 29 * stride + 0, vextq_u8(d1_lo, d1_hi, 14));
- vst1q_u8(dst + 29 * stride + 16, vextq_u8(d1_hi, a31, 14));
- vst1q_u8(dst + 30 * stride + 0, vextq_u8(d0_lo, d0_hi, 15));
- vst1q_u8(dst + 30 * stride + 16, vextq_u8(d0_hi, a31, 15));
- vst1q_u8(dst + 31 * stride + 0, vextq_u8(d1_lo, d1_hi, 15));
- vst1q_u8(dst + 31 * stride + 16, vextq_u8(d1_hi, a31, 15));
+
+ d0_hi = vextq_u8(d0_lo, d0_hi, 15);
+ d0_lo = vextq_u8(d0_lo, d0_lo, 15);
+ d1_hi = vextq_u8(d1_lo, d1_hi, 15);
+ d1_lo = vextq_u8(d1_lo, d1_lo, 15);
+
+ vst1q_u8(dst + 2 * stride + 0, vextq_u8(d0_lo, d0_hi, 2));
+ vst1q_u8(dst + 2 * stride + 16, vextq_u8(d0_hi, a31, 2));
+ vst1q_u8(dst + 3 * stride + 0, vextq_u8(d1_lo, d1_hi, 2));
+ vst1q_u8(dst + 3 * stride + 16, vextq_u8(d1_hi, a31, 2));
+ vst1q_u8(dst + 4 * stride + 0, vextq_u8(d0_lo, d0_hi, 3));
+ vst1q_u8(dst + 4 * stride + 16, vextq_u8(d0_hi, a31, 3));
+ vst1q_u8(dst + 5 * stride + 0, vextq_u8(d1_lo, d1_hi, 3));
+ vst1q_u8(dst + 5 * stride + 16, vextq_u8(d1_hi, a31, 3));
+ vst1q_u8(dst + 6 * stride + 0, vextq_u8(d0_lo, d0_hi, 4));
+ vst1q_u8(dst + 6 * stride + 16, vextq_u8(d0_hi, a31, 4));
+ vst1q_u8(dst + 7 * stride + 0, vextq_u8(d1_lo, d1_hi, 4));
+ vst1q_u8(dst + 7 * stride + 16, vextq_u8(d1_hi, a31, 4));
+ vst1q_u8(dst + 8 * stride + 0, vextq_u8(d0_lo, d0_hi, 5));
+ vst1q_u8(dst + 8 * stride + 16, vextq_u8(d0_hi, a31, 5));
+ vst1q_u8(dst + 9 * stride + 0, vextq_u8(d1_lo, d1_hi, 5));
+ vst1q_u8(dst + 9 * stride + 16, vextq_u8(d1_hi, a31, 5));
+ vst1q_u8(dst + 10 * stride + 0, vextq_u8(d0_lo, d0_hi, 6));
+ vst1q_u8(dst + 10 * stride + 16, vextq_u8(d0_hi, a31, 6));
+ vst1q_u8(dst + 11 * stride + 0, vextq_u8(d1_lo, d1_hi, 6));
+ vst1q_u8(dst + 11 * stride + 16, vextq_u8(d1_hi, a31, 6));
+ vst1q_u8(dst + 12 * stride + 0, vextq_u8(d0_lo, d0_hi, 7));
+ vst1q_u8(dst + 12 * stride + 16, vextq_u8(d0_hi, a31, 7));
+ vst1q_u8(dst + 13 * stride + 0, vextq_u8(d1_lo, d1_hi, 7));
+ vst1q_u8(dst + 13 * stride + 16, vextq_u8(d1_hi, a31, 7));
+ vst1q_u8(dst + 14 * stride + 0, vextq_u8(d0_lo, d0_hi, 8));
+ vst1q_u8(dst + 14 * stride + 16, vextq_u8(d0_hi, a31, 8));
+ vst1q_u8(dst + 15 * stride + 0, vextq_u8(d1_lo, d1_hi, 8));
+ vst1q_u8(dst + 15 * stride + 16, vextq_u8(d1_hi, a31, 8));
+ vst1q_u8(dst + 16 * stride + 0, vextq_u8(d0_lo, d0_hi, 9));
+ vst1q_u8(dst + 16 * stride + 16, vextq_u8(d0_hi, a31, 9));
+ vst1q_u8(dst + 17 * stride + 0, vextq_u8(d1_lo, d1_hi, 9));
+ vst1q_u8(dst + 17 * stride + 16, vextq_u8(d1_hi, a31, 9));
+ vst1q_u8(dst + 18 * stride + 0, vextq_u8(d0_lo, d0_hi, 10));
+ vst1q_u8(dst + 18 * stride + 16, vextq_u8(d0_hi, a31, 10));
+ vst1q_u8(dst + 19 * stride + 0, vextq_u8(d1_lo, d1_hi, 10));
+ vst1q_u8(dst + 19 * stride + 16, vextq_u8(d1_hi, a31, 10));
+ vst1q_u8(dst + 20 * stride + 0, vextq_u8(d0_lo, d0_hi, 11));
+ vst1q_u8(dst + 20 * stride + 16, vextq_u8(d0_hi, a31, 11));
+ vst1q_u8(dst + 21 * stride + 0, vextq_u8(d1_lo, d1_hi, 11));
+ vst1q_u8(dst + 21 * stride + 16, vextq_u8(d1_hi, a31, 11));
+ vst1q_u8(dst + 22 * stride + 0, vextq_u8(d0_lo, d0_hi, 12));
+ vst1q_u8(dst + 22 * stride + 16, vextq_u8(d0_hi, a31, 12));
+ vst1q_u8(dst + 23 * stride + 0, vextq_u8(d1_lo, d1_hi, 12));
+ vst1q_u8(dst + 23 * stride + 16, vextq_u8(d1_hi, a31, 12));
+ vst1q_u8(dst + 24 * stride + 0, vextq_u8(d0_lo, d0_hi, 13));
+ vst1q_u8(dst + 24 * stride + 16, vextq_u8(d0_hi, a31, 13));
+ vst1q_u8(dst + 25 * stride + 0, vextq_u8(d1_lo, d1_hi, 13));
+ vst1q_u8(dst + 25 * stride + 16, vextq_u8(d1_hi, a31, 13));
+ vst1q_u8(dst + 26 * stride + 0, vextq_u8(d0_lo, d0_hi, 14));
+ vst1q_u8(dst + 26 * stride + 16, vextq_u8(d0_hi, a31, 14));
+ vst1q_u8(dst + 27 * stride + 0, vextq_u8(d1_lo, d1_hi, 14));
+ vst1q_u8(dst + 27 * stride + 16, vextq_u8(d1_hi, a31, 14));
+ vst1q_u8(dst + 28 * stride + 0, vextq_u8(d0_lo, d0_hi, 15));
+ vst1q_u8(dst + 28 * stride + 16, vextq_u8(d0_hi, a31, 15));
+ vst1q_u8(dst + 29 * stride + 0, vextq_u8(d1_lo, d1_hi, 15));
+ vst1q_u8(dst + 29 * stride + 16, vextq_u8(d1_hi, a31, 15));
+ vst1q_u8(dst + 30 * stride + 0, d0_hi);
+ vst1q_u8(dst + 30 * stride + 16, a31);
+ vst1q_u8(dst + 31 * stride + 0, d1_hi);
+ vst1q_u8(dst + 31 * stride + 16, a31);
}
// -----------------------------------------------------------------------------