aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--ChangeLog15
-rw-r--r--manual/tunables.texi2
-rw-r--r--sysdeps/aarch64/multiarch/Makefile2
-rw-r--r--sysdeps/aarch64/multiarch/ifunc-impl-list.c3
-rw-r--r--sysdeps/aarch64/multiarch/memcpy.c7
-rw-r--r--sysdeps/aarch64/multiarch/memcpy_falkor.S187
-rw-r--r--sysdeps/unix/sysv/linux/aarch64/cpu-features.c1
-rw-r--r--sysdeps/unix/sysv/linux/aarch64/cpu-features.h3
8 files changed, 216 insertions, 4 deletions
diff --git a/ChangeLog b/ChangeLog
index 36dd9e6d38..0132300249 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,18 @@
+2017-08-09 Siddhesh Poyarekar <siddhesh@sourceware.org>
+
+ * manual/tunables.texi (Tunable glibc.tune.cpu): Add falkor.
+ * sysdeps/aarch64/multiarch/Makefile (sysdep_routines): Add
+ memcpy_falkor.
+ * sysdeps/aarch64/multiarch/ifunc-impl-list.c (MAX_IFUNC):
+ Bump.
+ (__libc_ifunc_impl_list): Add __memcpy_falkor.
+ * sysdeps/aarch64/multiarch/memcpy.c: Likewise.
+ * sysdeps/aarch64/multiarch/memcpy_falkor.S: New file.
+ * sysdeps/unix/sysv/linux/aarch64/cpu-features.c (cpu_list):
+ Add falkor.
+ * sysdeps/unix/sysv/linux/aarch64/cpu-features.h (IS_FALKOR):
+ New macro.
+
2017-08-08 Adhemerval Zanella <adhemerval.zanella@linaro.org>
[BZ #759]
diff --git a/manual/tunables.texi b/manual/tunables.texi
index 4c658bf3df..3c19567a28 100644
--- a/manual/tunables.texi
+++ b/manual/tunables.texi
@@ -267,7 +267,7 @@ This tunable is specific to i386 and x86-64.
@deftp Tunable glibc.tune.cpu
The @code{glibc.tune.cpu=xxx} tunable allows the user to tell @theglibc{} to
assume that the CPU is @code{xxx} where xxx may have one of these values:
-@code{generic}, @code{thunderxt88}.
+@code{generic}, @code{falkor}, @code{thunderxt88}.
This tunable is specific to aarch64.
@end deftp
diff --git a/sysdeps/aarch64/multiarch/Makefile b/sysdeps/aarch64/multiarch/Makefile
index 78d52c717d..164ba1ae4b 100644
--- a/sysdeps/aarch64/multiarch/Makefile
+++ b/sysdeps/aarch64/multiarch/Makefile
@@ -1,3 +1,3 @@
ifeq ($(subdir),string)
-sysdep_routines += memcpy_generic memcpy_thunderx
+sysdep_routines += memcpy_generic memcpy_thunderx memcpy_falkor
endif
diff --git a/sysdeps/aarch64/multiarch/ifunc-impl-list.c b/sysdeps/aarch64/multiarch/ifunc-impl-list.c
index 32056bcec3..8e873b3d9f 100644
--- a/sysdeps/aarch64/multiarch/ifunc-impl-list.c
+++ b/sysdeps/aarch64/multiarch/ifunc-impl-list.c
@@ -25,7 +25,7 @@
#include <stdio.h>
/* Maximum number of IFUNC implementations. */
-#define MAX_IFUNC 2
+#define MAX_IFUNC 3
size_t
__libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
@@ -40,6 +40,7 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
/* Support sysdeps/aarch64/multiarch/memcpy.c and memmove.c. */
IFUNC_IMPL (i, name, memcpy,
IFUNC_IMPL_ADD (array, i, memcpy, 1, __memcpy_thunderx)
+ IFUNC_IMPL_ADD (array, i, memcpy, 1, __memcpy_falkor)
IFUNC_IMPL_ADD (array, i, memcpy, 1, __memcpy_generic))
IFUNC_IMPL (i, name, memmove,
IFUNC_IMPL_ADD (array, i, memmove, 1, __memmove_thunderx)
diff --git a/sysdeps/aarch64/multiarch/memcpy.c b/sysdeps/aarch64/multiarch/memcpy.c
index 9f73efbba7..b395df1c63 100644
--- a/sysdeps/aarch64/multiarch/memcpy.c
+++ b/sysdeps/aarch64/multiarch/memcpy.c
@@ -30,9 +30,14 @@ extern __typeof (__redirect_memcpy) __libc_memcpy;
extern __typeof (__redirect_memcpy) __memcpy_generic attribute_hidden;
extern __typeof (__redirect_memcpy) __memcpy_thunderx attribute_hidden;
+extern __typeof (__redirect_memcpy) __memcpy_falkor attribute_hidden;
libc_ifunc (__libc_memcpy,
- IS_THUNDERX (midr) ? __memcpy_thunderx : __memcpy_generic);
+ (IS_THUNDERX (midr)
+ ? __memcpy_thunderx
+ : (IS_FALKOR (midr)
+ ? __memcpy_falkor
+ : __memcpy_generic)));
# undef memcpy
strong_alias (__libc_memcpy, memcpy);
diff --git a/sysdeps/aarch64/multiarch/memcpy_falkor.S b/sysdeps/aarch64/multiarch/memcpy_falkor.S
new file mode 100644
index 0000000000..3708281147
--- /dev/null
+++ b/sysdeps/aarch64/multiarch/memcpy_falkor.S
@@ -0,0 +1,187 @@
+/* Optimized memcpy for Qualcomm Falkor processor.
+ Copyright (C) 2017 Free Software Foundation, Inc.
+
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+
+/* Assumptions:
+ *
+ * ARMv8-a, AArch64, falkor, unaligned accesses.
+ *
+ */
+
+#define dstin x0
+#define src x1
+#define count x2
+#define dst x3
+#define srcend x4
+#define dstend x5
+#define A_l x6
+#define A_lw w6
+#define A_h x7
+#define A_hw w7
+#define tmp1 x14
+
+/* Copies are split into 3 main cases:
+
+ 1. Small copies of up to 32 bytes
+ 2. Medium copies of 33..128 bytes which are fully unrolled
+ 3. Large copies of more than 128 bytes.
+
+ Large copies align the sourceto a quad word and use an unrolled loop
+ processing 64 bytes per iteration.
+
+ FALKOR-SPECIFIC DESIGN:
+
+ The smallest copies (32 bytes or less) focus on optimal pipeline usage,
+ which is why the redundant copies of 0-3 bytes have been replaced with
+ conditionals, since the former would unnecessarily break across multiple
+ issue groups. The medium copy group has been enlarged to 128 bytes since
+ bumping up the small copies up to 32 bytes allows us to do that without
+ cost and also allows us the reduce the size of the prep code before loop64.
+
+ All copies are done only via two registers r6 and r7. This is to ensure
+ that all loads hit a single hardware prefetcher which can get correctly
+ trained to prefetch a single stream.
+
+ The non-temporal stores help optimize cache utilization.
+*/
+
+#if IS_IN (libc)
+ENTRY_ALIGN (__memcpy_falkor, 6)
+
+ cmp count, 32
+ add srcend, src, count
+ add dstend, dstin, count
+ b.ls L(copy32)
+ ldp A_l, A_h, [src]
+ cmp count, 128
+ stp A_l, A_h, [dstin]
+ b.hi L(copy_long)
+
+ /* Medium copies: 33..128 bytes. */
+ sub tmp1, count, 1
+ ldp A_l, A_h, [src, 16]
+ stp A_l, A_h, [dstin, 16]
+ tbz tmp1, 6, 1f
+ ldp A_l, A_h, [src, 32]
+ stp A_l, A_h, [dstin, 32]
+ ldp A_l, A_h, [src, 48]
+ stp A_l, A_h, [dstin, 48]
+ ldp A_l, A_h, [srcend, -64]
+ stp A_l, A_h, [dstend, -64]
+ ldp A_l, A_h, [srcend, -48]
+ stp A_l, A_h, [dstend, -48]
+1:
+ ldp A_l, A_h, [srcend, -32]
+ stp A_l, A_h, [dstend, -32]
+ ldp A_l, A_h, [srcend, -16]
+ stp A_l, A_h, [dstend, -16]
+ ret
+
+ .p2align 4
+ /* Small copies: 0..32 bytes. */
+L(copy32):
+ /* 16-32 */
+ cmp count, 16
+ b.lo 1f
+ ldp A_l, A_h, [src]
+ stp A_l, A_h, [dstin]
+ ldp A_l, A_h, [srcend, -16]
+ stp A_l, A_h, [dstend, -16]
+ ret
+ .p2align 4
+1:
+ /* 8-15 */
+ tbz count, 3, 1f
+ ldr A_l, [src]
+ str A_l, [dstin]
+ ldr A_l, [srcend, -8]
+ str A_l, [dstend, -8]
+ ret
+ .p2align 4
+1:
+ /* 4-7 */
+ tbz count, 2, 1f
+ ldr A_lw, [src]
+ str A_lw, [dstin]
+ ldr A_lw, [srcend, -4]
+ str A_lw, [dstend, -4]
+ ret
+ .p2align 4
+1:
+ /* 2-3 */
+ tbz count, 1, 1f
+ ldrh A_lw, [src]
+ strh A_lw, [dstin]
+ ldrh A_lw, [srcend, -2]
+ strh A_lw, [dstend, -2]
+ ret
+ .p2align 4
+1:
+ /* 0-1 */
+ tbz count, 0, 1f
+ ldrb A_lw, [src]
+ strb A_lw, [dstin]
+1:
+ ret
+
+ /* Align SRC to 16 bytes and copy; that way at least one of the
+ accesses is aligned throughout the copy sequence.
+
+ The count is off by 0 to 15 bytes, but this is OK because we trim
+ off the last 64 bytes to copy off from the end. Due to this the
+ loop never runs out of bounds. */
+ .p2align 6
+L(copy_long):
+ sub count, count, 64 + 16
+ and tmp1, src, 15
+ bic src, src, 15
+ sub dst, dstin, tmp1
+ add count, count, tmp1
+
+L(loop64):
+ ldp A_l, A_h, [src, 16]!
+ stnp A_l, A_h, [dst, 16]
+ ldp A_l, A_h, [src, 16]!
+ subs count, count, 64
+ stnp A_l, A_h, [dst, 32]
+ ldp A_l, A_h, [src, 16]!
+ stnp A_l, A_h, [dst, 48]
+ ldp A_l, A_h, [src, 16]!
+ stnp A_l, A_h, [dst, 64]
+ add dst, dst, 64
+ b.hi L(loop64)
+
+ /* Write the last full set of 64 bytes. The remainder is at most 64
+ bytes, so it is safe to always copy 64 bytes from the end even if
+ there is just 1 byte left. */
+L(last64):
+ ldp A_l, A_h, [srcend, -64]
+ stnp A_l, A_h, [dstend, -64]
+ ldp A_l, A_h, [srcend, -48]
+ stnp A_l, A_h, [dstend, -48]
+ ldp A_l, A_h, [srcend, -32]
+ stnp A_l, A_h, [dstend, -32]
+ ldp A_l, A_h, [srcend, -16]
+ stnp A_l, A_h, [dstend, -16]
+ ret
+
+END (__memcpy_falkor)
+libc_hidden_builtin_def (__memcpy_falkor)
+#endif
diff --git a/sysdeps/unix/sysv/linux/aarch64/cpu-features.c b/sysdeps/unix/sysv/linux/aarch64/cpu-features.c
index 0275d11c7f..18f5e60f4c 100644
--- a/sysdeps/unix/sysv/linux/aarch64/cpu-features.c
+++ b/sysdeps/unix/sysv/linux/aarch64/cpu-features.c
@@ -28,6 +28,7 @@ struct cpu_list
};
static struct cpu_list cpu_list[] = {
+ {"falkor", 0x510FC000},
{"thunderxt88", 0x430F0A10},
{"generic", 0x0}
};
diff --git a/sysdeps/unix/sysv/linux/aarch64/cpu-features.h b/sysdeps/unix/sysv/linux/aarch64/cpu-features.h
index c92b650984..73cb53da9a 100644
--- a/sysdeps/unix/sysv/linux/aarch64/cpu-features.h
+++ b/sysdeps/unix/sysv/linux/aarch64/cpu-features.h
@@ -41,6 +41,9 @@
#define IS_THUNDERX(midr) (MIDR_IMPLEMENTOR(midr) == 'C' \
&& MIDR_PARTNUM(midr) == 0x0a1)
+#define IS_FALKOR(midr) (MIDR_IMPLEMENTOR(midr) == 'Q' \
+ && MIDR_PARTNUM(midr) == 0xc00)
+
struct cpu_features
{
uint64_t midr_el1;