aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Earnshaw <rearnsha@arm.com>2014-11-05 13:51:56 +0000
committerMarcus Shawcroft <marcus.shawcroft@arm.com>2014-11-05 13:51:56 +0000
commitbe9d4ccc7fe62751db1a5fdcb31958561dbbda9a (patch)
tree7a4140cf429ff51370f03fae459f8055701baef2
parent7110166d4fe36899c5ecf6fd31635f3cd75d8931 (diff)
downloadglibc-be9d4ccc7fe62751db1a5fdcb31958561dbbda9a.tar
glibc-be9d4ccc7fe62751db1a5fdcb31958561dbbda9a.tar.gz
glibc-be9d4ccc7fe62751db1a5fdcb31958561dbbda9a.tar.bz2
glibc-be9d4ccc7fe62751db1a5fdcb31958561dbbda9a.zip
[AArch64] Add optimized strchrnul.
Here is an optimized implementation of __strchrnul. The simplification that we don't have to track precisely why the loop terminates (match or end-of-string) means we have to do less work in both setup and the core inner loop. That means this should never be slower than strchr. As with strchr, the use of LD1 means we do not need different versions for big-/little-endian.
-rw-r--r--ChangeLog4
-rw-r--r--NEWS2
-rw-r--r--sysdeps/aarch64/strchrnul.S130
3 files changed, 136 insertions, 0 deletions
diff --git a/ChangeLog b/ChangeLog
index 0929cb6a24..51469e1056 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,7 @@
+2014-11-05 Richard Earnshaw <rearnsha@arm.com>
+
+ * sysdeps/aarch64/strchrnul.S: New file.
+
2014-11-05 Adhemerval Zanella <azanella@linux.vnet.ibm.com>
* sysdeps/powerpc/powerpc64/power8/memset.S (MTVSRD_V1_R4): Simplify
diff --git a/NEWS b/NEWS
index 382d38dbdf..4656531663 100644
--- a/NEWS
+++ b/NEWS
@@ -32,6 +32,8 @@ Version 2.20
17084, 17086, 17088, 17092, 17097, 17125, 17135, 17137, 17150, 17153,
17187, 17213, 17259, 17261, 17262, 17263, 17319, 17325, 17354.
+* Optimized strchrnul implementation for AArch64. Contributed by ARM Ltd.
+
* Reverted change of ABI data structures for s390 and s390x:
On s390 and s390x the size of struct ucontext and jmp_buf was increased in
2.19. This change is reverted in 2.20. The introduced 2.19 symbol versions
diff --git a/sysdeps/aarch64/strchrnul.S b/sysdeps/aarch64/strchrnul.S
new file mode 100644
index 0000000000..b98c2e951b
--- /dev/null
+++ b/sysdeps/aarch64/strchrnul.S
@@ -0,0 +1,130 @@
+/* strchrnul - find a character or nul in a string
+
+ Copyright (C) 2014 Free Software Foundation, Inc.
+
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+
+/* Assumptions:
+ *
+ * ARMv8-a, AArch64
+ * Neon Available.
+ */
+
+/* Arguments and results. */
+#define srcin x0
+#define chrin w1
+
+#define result x0
+
+/* Locals and temporaries. */
+
+#define src x2
+#define tmp1 x3
+#define wtmp2 w4
+#define tmp3 x5
+
+#define vrepchr v0
+#define vdata1 v1
+#define vdata2 v2
+#define vhas_nul1 v3
+#define vhas_nul2 v4
+#define vhas_chr1 v5
+#define vhas_chr2 v6
+#define vrepmask v15
+#define vend1 v16
+
+/* Core algorithm.
+
+ For each 32-byte hunk we calculate a 64-bit syndrome value, with
+ two bits per byte (LSB is always in bits 0 and 1, for both big
+ and little-endian systems). For each tuple, bit 0 is set iff
+ the relevant byte matched the requested character or nul. Since the
+ bits in the syndrome reflect exactly the order in which things occur
+ in the original string a count_trailing_zeros() operation will
+ identify exactly which byte is causing the termination. */
+
+ENTRY (__strchrnul)
+ /* Magic constant 0x40100401 to allow us to identify which lane
+ matches the termination condition. */
+ mov wtmp2, #0x0401
+ movk wtmp2, #0x4010, lsl #16
+ dup vrepchr.16b, chrin
+ bic src, srcin, #31 /* Work with aligned 32-byte hunks. */
+ dup vrepmask.4s, wtmp2
+ ands tmp1, srcin, #31
+ b.eq L(loop)
+
+ /* Input string is not 32-byte aligned. Rather than forcing
+ the padding bytes to a safe value, we calculate the syndrome
+ for all the bytes, but then mask off those bits of the
+ syndrome that are related to the padding. */
+ ld1 {vdata1.16b, vdata2.16b}, [src], #32
+ neg tmp1, tmp1
+ cmeq vhas_nul1.16b, vdata1.16b, #0
+ cmeq vhas_chr1.16b, vdata1.16b, vrepchr.16b
+ cmeq vhas_nul2.16b, vdata2.16b, #0
+ cmeq vhas_chr2.16b, vdata2.16b, vrepchr.16b
+ orr vhas_chr1.16b, vhas_chr1.16b, vhas_nul1.16b
+ orr vhas_chr2.16b, vhas_chr2.16b, vhas_nul2.16b
+ and vhas_chr1.16b, vhas_chr1.16b, vrepmask.16b
+ and vhas_chr2.16b, vhas_chr2.16b, vrepmask.16b
+ lsl tmp1, tmp1, #1
+ addp vend1.16b, vhas_chr1.16b, vhas_chr2.16b // 256->128
+ mov tmp3, #~0
+ addp vend1.16b, vend1.16b, vend1.16b // 128->64
+ lsr tmp1, tmp3, tmp1
+
+ mov tmp3, vend1.2d[0]
+ bic tmp1, tmp3, tmp1 // Mask padding bits.
+ cbnz tmp1, L(tail)
+
+L(loop):
+ ld1 {vdata1.16b, vdata2.16b}, [src], #32
+ cmeq vhas_nul1.16b, vdata1.16b, #0
+ cmeq vhas_chr1.16b, vdata1.16b, vrepchr.16b
+ cmeq vhas_nul2.16b, vdata2.16b, #0
+ cmeq vhas_chr2.16b, vdata2.16b, vrepchr.16b
+ /* Use a fast check for the termination condition. */
+ orr vhas_chr1.16b, vhas_nul1.16b, vhas_chr1.16b
+ orr vhas_chr2.16b, vhas_nul2.16b, vhas_chr2.16b
+ orr vend1.16b, vhas_chr1.16b, vhas_chr2.16b
+ addp vend1.2d, vend1.2d, vend1.2d
+ mov tmp1, vend1.2d[0]
+ cbz tmp1, L(loop)
+
+ /* Termination condition found. Now need to establish exactly why
+ we terminated. */
+ and vhas_chr1.16b, vhas_chr1.16b, vrepmask.16b
+ and vhas_chr2.16b, vhas_chr2.16b, vrepmask.16b
+ addp vend1.16b, vhas_chr1.16b, vhas_chr2.16b // 256->128
+ addp vend1.16b, vend1.16b, vend1.16b // 128->64
+
+ mov tmp1, vend1.2d[0]
+L(tail):
+ /* Count the trailing zeros, by bit reversing... */
+ rbit tmp1, tmp1
+ /* Re-bias source. */
+ sub src, src, #32
+ clz tmp1, tmp1 /* ... and counting the leading zeros. */
+ /* tmp1 is twice the offset into the fragment. */
+ add result, src, tmp1, lsr #1
+ ret
+
+END(__strchrnul)
+weak_alias (__strchrnul, strchrnul)