aboutsummaryrefslogtreecommitdiff
path: root/sysdeps
diff options
context:
space:
mode:
authorWilco Dijkstra <wilco.dijkstra@arm.com>2023-01-11 13:51:48 +0000
committerWilco Dijkstra <wilco.dijkstra@arm.com>2023-01-17 15:09:18 +0000
commit1bbb1a2022e126f21810d3d0ebe0a975d5243e43 (patch)
tree2e68581e051a3cd38389e118b9f1e5f4d26c7b6c /sysdeps
parent00776241776e67fc666b896c1e85770f4f3ec1e1 (diff)
downloadglibc-1bbb1a2022e126f21810d3d0ebe0a975d5243e43.tar
glibc-1bbb1a2022e126f21810d3d0ebe0a975d5243e43.tar.gz
glibc-1bbb1a2022e126f21810d3d0ebe0a975d5243e43.tar.bz2
glibc-1bbb1a2022e126f21810d3d0ebe0a975d5243e43.zip
AArch64: Improve strlen_asimd
Use shrn for the mask, merge tst+bne into cbnz, and tweak code alignment. Performance improves slightly as a result. Reviewed-by: Szabolcs Nagy <szabolcs.nagy@arm.com>
Diffstat (limited to 'sysdeps')
-rw-r--r--sysdeps/aarch64/multiarch/strlen_asimd.S16
1 files changed, 4 insertions, 12 deletions
diff --git a/sysdeps/aarch64/multiarch/strlen_asimd.S b/sysdeps/aarch64/multiarch/strlen_asimd.S
index ca6ab96ecf..490439491d 100644
--- a/sysdeps/aarch64/multiarch/strlen_asimd.S
+++ b/sysdeps/aarch64/multiarch/strlen_asimd.S
@@ -48,6 +48,7 @@
#define tmp x2
#define tmpw w2
#define synd x3
+#define syndw w3
#define shift x4
/* For the first 32 bytes, NUL detection works on the principle that
@@ -87,7 +88,6 @@
ENTRY (__strlen_asimd)
PTR_ARG (0)
-
and tmp1, srcin, MIN_PAGE_SIZE - 1
cmp tmp1, MIN_PAGE_SIZE - 32
b.hi L(page_cross)
@@ -123,7 +123,6 @@ ENTRY (__strlen_asimd)
add len, len, tmp1, lsr 3
ret
- .p2align 3
/* Look for a NUL byte at offset 16..31 in the string. */
L(bytes16_31):
ldp data1, data2, [srcin, 16]
@@ -151,6 +150,7 @@ L(bytes16_31):
add len, len, tmp1, lsr 3
ret
+ nop
L(loop_entry):
bic src, srcin, 31
@@ -166,18 +166,12 @@ L(loop):
/* Low 32 bits of synd are non-zero if a NUL was found in datav1. */
cmeq maskv.16b, datav1.16b, 0
sub len, src, srcin
- tst synd, 0xffffffff
- b.ne 1f
+ cbnz syndw, 1f
cmeq maskv.16b, datav2.16b, 0
add len, len, 16
1:
/* Generate a bitmask and compute correct byte offset. */
-#ifdef __AARCH64EB__
- bic maskv.8h, 0xf0
-#else
- bic maskv.8h, 0x0f, lsl 8
-#endif
- umaxp maskv.16b, maskv.16b, maskv.16b
+ shrn maskv.8b, maskv.8h, 4
fmov synd, maskd
#ifndef __AARCH64EB__
rbit synd, synd
@@ -186,8 +180,6 @@ L(loop):
add len, len, tmp, lsr 2
ret
- .p2align 4
-
L(page_cross):
bic src, srcin, 31
mov tmpw, 0x0c03