summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJames Zern <jzern@google.com>2016-11-29 19:47:50 -0800
committerJames Zern <jzern@google.com>2016-12-06 15:13:33 -0800
commitaf9d7aa9fb81e722a719efd8c2fea7a7a80382e9 (patch)
tree8ac08caf731d6ec53f81f590be8475cfe56768cb
parentc6641782c3ea2e12417c04ac2b1b09f6a82caf88 (diff)
downloadlibvpx-af9d7aa9fb81e722a719efd8c2fea7a7a80382e9.tar
libvpx-af9d7aa9fb81e722a719efd8c2fea7a7a80382e9.tar.gz
libvpx-af9d7aa9fb81e722a719efd8c2fea7a7a80382e9.tar.bz2
libvpx-af9d7aa9fb81e722a719efd8c2fea7a7a80382e9.zip
idct16x16,NEON: rm output_stride from pass1 fns
vpx_idct16x16_256_add_neon_pass1, vpx_idct16x16_10_add_neon: this was a constant 8 in all cases meaning the results are stored contiguously, this allows the number of stores to be reduced. Change-Id: I7858a0a15a284883ef45c13dfd97c308df9ea09e
-rw-r--r--vpx_dsp/arm/idct16x16_add_neon.asm49
-rw-r--r--vpx_dsp/arm/idct16x16_add_neon.c138
-rw-r--r--vpx_dsp/arm/idct16x16_neon.c20
3 files changed, 52 insertions, 155 deletions
diff --git a/vpx_dsp/arm/idct16x16_add_neon.asm b/vpx_dsp/arm/idct16x16_add_neon.asm
index 2b41d62bb..e7a5e1012 100644
--- a/vpx_dsp/arm/idct16x16_add_neon.asm
+++ b/vpx_dsp/arm/idct16x16_add_neon.asm
@@ -36,12 +36,10 @@
MEND
AREA Block, CODE, READONLY ; name this block of code
-;void |vpx_idct16x16_256_add_neon_pass1|(const int16_t *input,
-; int16_t *output, int output_stride)
+;void |vpx_idct16x16_256_add_neon_pass1|(const int16_t *input, int16_t *output)
;
; r0 const int16_t *input
; r1 int16_t *output
-; r2 int output_stride
; idct16 stage1 - stage6 on all the elements loaded in q8-q15. The output
; will be stored back into q8-q15 registers. This function will touch q0-q7
@@ -247,22 +245,10 @@
vsub.s16 q15, q0, q15 ; step2[7] = step1[0] - step1[7];
; store the data
- vst1.64 {d16}, [r1], r2
- vst1.64 {d17}, [r1], r2
- vst1.64 {d18}, [r1], r2
- vst1.64 {d19}, [r1], r2
- vst1.64 {d20}, [r1], r2
- vst1.64 {d21}, [r1], r2
- vst1.64 {d22}, [r1], r2
- vst1.64 {d23}, [r1], r2
- vst1.64 {d24}, [r1], r2
- vst1.64 {d25}, [r1], r2
- vst1.64 {d26}, [r1], r2
- vst1.64 {d27}, [r1], r2
- vst1.64 {d28}, [r1], r2
- vst1.64 {d29}, [r1], r2
- vst1.64 {d30}, [r1], r2
- vst1.64 {d31}, [r1], r2
+ vst1.64 {q8-q9}, [r1]!
+ vst1.64 {q10-q11}, [r1]!
+ vst1.64 {q12-q13}, [r1]!
+ vst1.64 {q14-q15}, [r1]
bx lr
ENDP ; |vpx_idct16x16_256_add_neon_pass1|
@@ -767,12 +753,10 @@ end_idct16x16_pass2
bx lr
ENDP ; |vpx_idct16x16_256_add_neon_pass2|
-;void |vpx_idct16x16_10_add_neon_pass1|(const int16_t *input,
-; int16_t *output, int output_stride)
+;void |vpx_idct16x16_10_add_neon_pass1|(const int16_t *input, int16_t *output)
;
; r0 const int16_t *input
; r1 int16_t *output
-; r2 int output_stride
; idct16 stage1 - stage6 on all the elements loaded in q8-q15. The output
; will be stored back into q8-q15 registers. This function will touch q0-q7
@@ -864,22 +848,11 @@ end_idct16x16_pass2
vsub.s16 q15, q8, q7 ; step2[7] = step1[0] - step1[7];
; store the data
- vst1.64 {d4}, [r1], r2
- vst1.64 {d5}, [r1], r2
- vst1.64 {d18}, [r1], r2
- vst1.64 {d19}, [r1], r2
- vst1.64 {d20}, [r1], r2
- vst1.64 {d21}, [r1], r2
- vst1.64 {d22}, [r1], r2
- vst1.64 {d23}, [r1], r2
- vst1.64 {d24}, [r1], r2
- vst1.64 {d25}, [r1], r2
- vst1.64 {d26}, [r1], r2
- vst1.64 {d27}, [r1], r2
- vst1.64 {d28}, [r1], r2
- vst1.64 {d29}, [r1], r2
- vst1.64 {d30}, [r1], r2
- vst1.64 {d31}, [r1], r2
+ vst1.64 {q2}, [r1]!
+ vst1.64 {q9-q10}, [r1]!
+ vst1.64 {q11-q12}, [r1]!
+ vst1.64 {q13-q14}, [r1]!
+ vst1.64 {q15}, [r1]
bx lr
ENDP ; |vpx_idct16x16_10_add_neon_pass1|
diff --git a/vpx_dsp/arm/idct16x16_add_neon.c b/vpx_dsp/arm/idct16x16_add_neon.c
index 5fab91c11..3e11159e2 100644
--- a/vpx_dsp/arm/idct16x16_add_neon.c
+++ b/vpx_dsp/arm/idct16x16_add_neon.c
@@ -14,14 +14,11 @@
#include "vpx_dsp/arm/transpose_neon.h"
#include "vpx_dsp/txfm_common.h"
-void vpx_idct16x16_256_add_neon_pass1(const int16_t *in, int16_t *out,
- int output_stride) {
+void vpx_idct16x16_256_add_neon_pass1(const int16_t *in, int16_t *out) {
int16x4_t d0s16, d1s16, d2s16, d3s16;
int16x4_t d8s16, d9s16, d10s16, d11s16, d12s16, d13s16, d14s16, d15s16;
int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16, d22s16, d23s16;
int16x4_t d24s16, d25s16, d26s16, d27s16, d28s16, d29s16, d30s16, d31s16;
- uint64x1_t d16u64, d17u64, d18u64, d19u64, d20u64, d21u64, d22u64, d23u64;
- uint64x1_t d24u64, d25u64, d26u64, d27u64, d28u64, d29u64, d30u64, d31u64;
int16x8_t q0s16, q1s16, q2s16, q3s16, q4s16, q5s16, q6s16, q7s16;
int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, q14s16, q15s16;
int32x4_t q0s32, q1s32, q2s32, q3s32, q5s32, q6s32, q9s32;
@@ -197,56 +194,22 @@ void vpx_idct16x16_256_add_neon_pass1(const int16_t *in, int16_t *out,
q14s16 = vsubq_s16(q1s16, q6s16);
q15s16 = vsubq_s16(q0s16, q15s16);
- d16u64 = vreinterpret_u64_s16(vget_low_s16(q8s16));
- d17u64 = vreinterpret_u64_s16(vget_high_s16(q8s16));
- d18u64 = vreinterpret_u64_s16(vget_low_s16(q9s16));
- d19u64 = vreinterpret_u64_s16(vget_high_s16(q9s16));
- d20u64 = vreinterpret_u64_s16(vget_low_s16(q10s16));
- d21u64 = vreinterpret_u64_s16(vget_high_s16(q10s16));
- d22u64 = vreinterpret_u64_s16(vget_low_s16(q11s16));
- d23u64 = vreinterpret_u64_s16(vget_high_s16(q11s16));
- d24u64 = vreinterpret_u64_s16(vget_low_s16(q12s16));
- d25u64 = vreinterpret_u64_s16(vget_high_s16(q12s16));
- d26u64 = vreinterpret_u64_s16(vget_low_s16(q13s16));
- d27u64 = vreinterpret_u64_s16(vget_high_s16(q13s16));
- d28u64 = vreinterpret_u64_s16(vget_low_s16(q14s16));
- d29u64 = vreinterpret_u64_s16(vget_high_s16(q14s16));
- d30u64 = vreinterpret_u64_s16(vget_low_s16(q15s16));
- d31u64 = vreinterpret_u64_s16(vget_high_s16(q15s16));
-
// store the data
- output_stride >>= 1; // output_stride / 2, out is int16_t
- vst1_u64((uint64_t *)out, d16u64);
- out += output_stride;
- vst1_u64((uint64_t *)out, d17u64);
- out += output_stride;
- vst1_u64((uint64_t *)out, d18u64);
- out += output_stride;
- vst1_u64((uint64_t *)out, d19u64);
- out += output_stride;
- vst1_u64((uint64_t *)out, d20u64);
- out += output_stride;
- vst1_u64((uint64_t *)out, d21u64);
- out += output_stride;
- vst1_u64((uint64_t *)out, d22u64);
- out += output_stride;
- vst1_u64((uint64_t *)out, d23u64);
- out += output_stride;
- vst1_u64((uint64_t *)out, d24u64);
- out += output_stride;
- vst1_u64((uint64_t *)out, d25u64);
- out += output_stride;
- vst1_u64((uint64_t *)out, d26u64);
- out += output_stride;
- vst1_u64((uint64_t *)out, d27u64);
- out += output_stride;
- vst1_u64((uint64_t *)out, d28u64);
- out += output_stride;
- vst1_u64((uint64_t *)out, d29u64);
- out += output_stride;
- vst1_u64((uint64_t *)out, d30u64);
- out += output_stride;
- vst1_u64((uint64_t *)out, d31u64);
+ vst1q_s16(out, q8s16);
+ out += 8;
+ vst1q_s16(out, q9s16);
+ out += 8;
+ vst1q_s16(out, q10s16);
+ out += 8;
+ vst1q_s16(out, q11s16);
+ out += 8;
+ vst1q_s16(out, q12s16);
+ out += 8;
+ vst1q_s16(out, q13s16);
+ out += 8;
+ vst1q_s16(out, q14s16);
+ out += 8;
+ vst1q_s16(out, q15s16);
}
void vpx_idct16x16_256_add_neon_pass2(const int16_t *src, int16_t *out,
@@ -798,12 +761,9 @@ void vpx_idct16x16_256_add_neon_pass2(const int16_t *src, int16_t *out,
}
}
-void vpx_idct16x16_10_add_neon_pass1(const int16_t *in, int16_t *out,
- int output_stride) {
+void vpx_idct16x16_10_add_neon_pass1(const int16_t *in, int16_t *out) {
int16x4_t d4s16;
int16x4_t d8s16, d9s16, d10s16, d11s16, d12s16, d13s16, d14s16, d15s16;
- uint64x1_t d4u64, d5u64, d18u64, d19u64, d20u64, d21u64, d22u64, d23u64;
- uint64x1_t d24u64, d25u64, d26u64, d27u64, d28u64, d29u64, d30u64, d31u64;
int16x8_t q0s16, q1s16, q2s16, q4s16, q5s16, q6s16, q7s16;
int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, q14s16, q15s16;
int32x4_t q6s32, q9s32;
@@ -881,56 +841,22 @@ void vpx_idct16x16_10_add_neon_pass1(const int16_t *in, int16_t *out,
q14s16 = vsubq_s16(q8s16, q6s16);
q15s16 = vsubq_s16(q8s16, q7s16);
- d4u64 = vreinterpret_u64_s16(vget_low_s16(q2s16));
- d5u64 = vreinterpret_u64_s16(vget_high_s16(q2s16));
- d18u64 = vreinterpret_u64_s16(vget_low_s16(q9s16));
- d19u64 = vreinterpret_u64_s16(vget_high_s16(q9s16));
- d20u64 = vreinterpret_u64_s16(vget_low_s16(q10s16));
- d21u64 = vreinterpret_u64_s16(vget_high_s16(q10s16));
- d22u64 = vreinterpret_u64_s16(vget_low_s16(q11s16));
- d23u64 = vreinterpret_u64_s16(vget_high_s16(q11s16));
- d24u64 = vreinterpret_u64_s16(vget_low_s16(q12s16));
- d25u64 = vreinterpret_u64_s16(vget_high_s16(q12s16));
- d26u64 = vreinterpret_u64_s16(vget_low_s16(q13s16));
- d27u64 = vreinterpret_u64_s16(vget_high_s16(q13s16));
- d28u64 = vreinterpret_u64_s16(vget_low_s16(q14s16));
- d29u64 = vreinterpret_u64_s16(vget_high_s16(q14s16));
- d30u64 = vreinterpret_u64_s16(vget_low_s16(q15s16));
- d31u64 = vreinterpret_u64_s16(vget_high_s16(q15s16));
-
// store the data
- output_stride >>= 1; // output_stride / 2, out is int16_t
- vst1_u64((uint64_t *)out, d4u64);
- out += output_stride;
- vst1_u64((uint64_t *)out, d5u64);
- out += output_stride;
- vst1_u64((uint64_t *)out, d18u64);
- out += output_stride;
- vst1_u64((uint64_t *)out, d19u64);
- out += output_stride;
- vst1_u64((uint64_t *)out, d20u64);
- out += output_stride;
- vst1_u64((uint64_t *)out, d21u64);
- out += output_stride;
- vst1_u64((uint64_t *)out, d22u64);
- out += output_stride;
- vst1_u64((uint64_t *)out, d23u64);
- out += output_stride;
- vst1_u64((uint64_t *)out, d24u64);
- out += output_stride;
- vst1_u64((uint64_t *)out, d25u64);
- out += output_stride;
- vst1_u64((uint64_t *)out, d26u64);
- out += output_stride;
- vst1_u64((uint64_t *)out, d27u64);
- out += output_stride;
- vst1_u64((uint64_t *)out, d28u64);
- out += output_stride;
- vst1_u64((uint64_t *)out, d29u64);
- out += output_stride;
- vst1_u64((uint64_t *)out, d30u64);
- out += output_stride;
- vst1_u64((uint64_t *)out, d31u64);
+ vst1q_s16(out, q2s16);
+ out += 8;
+ vst1q_s16(out, q9s16);
+ out += 8;
+ vst1q_s16(out, q10s16);
+ out += 8;
+ vst1q_s16(out, q11s16);
+ out += 8;
+ vst1q_s16(out, q12s16);
+ out += 8;
+ vst1q_s16(out, q13s16);
+ out += 8;
+ vst1q_s16(out, q14s16);
+ out += 8;
+ vst1q_s16(out, q15s16);
}
void vpx_idct16x16_10_add_neon_pass2(const int16_t *src, int16_t *out,
diff --git a/vpx_dsp/arm/idct16x16_neon.c b/vpx_dsp/arm/idct16x16_neon.c
index f8af61aa4..141b8d27e 100644
--- a/vpx_dsp/arm/idct16x16_neon.c
+++ b/vpx_dsp/arm/idct16x16_neon.c
@@ -11,14 +11,12 @@
#include "./vpx_dsp_rtcd.h"
#include "vpx_dsp/vpx_dsp_common.h"
-void vpx_idct16x16_256_add_neon_pass1(const int16_t *input, int16_t *output,
- int output_stride);
+void vpx_idct16x16_256_add_neon_pass1(const int16_t *input, int16_t *output);
void vpx_idct16x16_256_add_neon_pass2(const int16_t *src, int16_t *output,
int16_t *pass1_output,
int16_t skip_adding, uint8_t *dest,
int dest_stride);
-void vpx_idct16x16_10_add_neon_pass1(const int16_t *input, int16_t *output,
- int output_stride);
+void vpx_idct16x16_10_add_neon_pass1(const int16_t *input, int16_t *output);
void vpx_idct16x16_10_add_neon_pass2(const int16_t *src, int16_t *output,
int16_t *pass1_output);
@@ -44,7 +42,7 @@ void vpx_idct16x16_256_add_neon(const int16_t *input, uint8_t *dest,
/* Parallel idct on the upper 8 rows */
// First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the
// stage 6 result in pass1_output.
- vpx_idct16x16_256_add_neon_pass1(input, pass1_output, 8);
+ vpx_idct16x16_256_add_neon_pass1(input, pass1_output);
// Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines
// with result in pass1(pass1_output) to calculate final result in stage 7
@@ -55,7 +53,7 @@ void vpx_idct16x16_256_add_neon(const int16_t *input, uint8_t *dest,
/* Parallel idct on the lower 8 rows */
// First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the
// stage 6 result in pass1_output.
- vpx_idct16x16_256_add_neon_pass1(input + 8 * 16, pass1_output, 8);
+ vpx_idct16x16_256_add_neon_pass1(input + 8 * 16, pass1_output);
// Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines
// with result in pass1(pass1_output) to calculate final result in stage 7
@@ -66,7 +64,7 @@ void vpx_idct16x16_256_add_neon(const int16_t *input, uint8_t *dest,
/* Parallel idct on the left 8 columns */
// First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the
// stage 6 result in pass1_output.
- vpx_idct16x16_256_add_neon_pass1(row_idct_output, pass1_output, 8);
+ vpx_idct16x16_256_add_neon_pass1(row_idct_output, pass1_output);
// Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines
// with result in pass1(pass1_output) to calculate final result in stage 7.
@@ -77,7 +75,7 @@ void vpx_idct16x16_256_add_neon(const int16_t *input, uint8_t *dest,
/* Parallel idct on the right 8 columns */
// First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the
// stage 6 result in pass1_output.
- vpx_idct16x16_256_add_neon_pass1(row_idct_output + 8 * 16, pass1_output, 8);
+ vpx_idct16x16_256_add_neon_pass1(row_idct_output + 8 * 16, pass1_output);
// Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines
// with result in pass1(pass1_output) to calculate final result in stage 7.
@@ -108,7 +106,7 @@ void vpx_idct16x16_10_add_neon(const int16_t *input, uint8_t *dest,
/* Parallel idct on the upper 8 rows */
// First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the
// stage 6 result in pass1_output.
- vpx_idct16x16_10_add_neon_pass1(input, pass1_output, 8);
+ vpx_idct16x16_10_add_neon_pass1(input, pass1_output);
// Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines
// with result in pass1(pass1_output) to calculate final result in stage 7
@@ -120,7 +118,7 @@ void vpx_idct16x16_10_add_neon(const int16_t *input, uint8_t *dest,
/* Parallel idct on the left 8 columns */
// First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the
// stage 6 result in pass1_output.
- vpx_idct16x16_256_add_neon_pass1(row_idct_output, pass1_output, 8);
+ vpx_idct16x16_256_add_neon_pass1(row_idct_output, pass1_output);
// Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines
// with result in pass1(pass1_output) to calculate final result in stage 7.
@@ -131,7 +129,7 @@ void vpx_idct16x16_10_add_neon(const int16_t *input, uint8_t *dest,
/* Parallel idct on the right 8 columns */
// First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the
// stage 6 result in pass1_output.
- vpx_idct16x16_256_add_neon_pass1(row_idct_output + 8 * 16, pass1_output, 8);
+ vpx_idct16x16_256_add_neon_pass1(row_idct_output + 8 * 16, pass1_output);
// Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines
// with result in pass1(pass1_output) to calculate final result in stage 7.