aboutsummaryrefslogtreecommitdiff
path: root/sysdeps/x86_64
diff options
context:
space:
mode:
authorNoah Goldstein <goldstein.w.n@gmail.com>2022-06-29 18:56:16 -0700
committerNoah Goldstein <goldstein.w.n@gmail.com>2022-06-29 19:47:52 -0700
commit8cfbbbcdf90c3b6a941631c7a57a7f8b09598ac4 (patch)
tree52a472f501a267efb01b2c94f36d048e537d5d02 /sysdeps/x86_64
parent88070acdd0e21a300a381e3d6ba106ca14285a5f (diff)
downloadglibc-8cfbbbcdf90c3b6a941631c7a57a7f8b09598ac4.tar
glibc-8cfbbbcdf90c3b6a941631c7a57a7f8b09598ac4.tar.gz
glibc-8cfbbbcdf90c3b6a941631c7a57a7f8b09598ac4.tar.bz2
glibc-8cfbbbcdf90c3b6a941631c7a57a7f8b09598ac4.zip
x86: Add missing IS_IN (libc) check to memmove-ssse3.S
Was missing to for the multiarch build rtld-memmove-ssse3.os was being built and exporting symbols: >$ nm string/rtld-memmove-ssse3.os U __GI___chk_fail 0000000000000020 T __memcpy_chk_ssse3 0000000000000040 T __memcpy_ssse3 0000000000000020 T __memmove_chk_ssse3 0000000000000040 T __memmove_ssse3 0000000000000000 T __mempcpy_chk_ssse3 0000000000000010 T __mempcpy_ssse3 U __x86_shared_cache_size_half Introduced after 2.35 in: commit 26b2478322db94edc9e0e8f577b2f71d291e5acb Author: Noah Goldstein <goldstein.w.n@gmail.com> Date: Thu Apr 14 11:47:40 2022 -0500 x86: Reduce code size of mem{move|pcpy|cpy}-ssse3
Diffstat (limited to 'sysdeps/x86_64')
-rw-r--r--sysdeps/x86_64/multiarch/memmove-ssse3.S60
1 files changed, 44 insertions, 16 deletions
diff --git a/sysdeps/x86_64/multiarch/memmove-ssse3.S b/sysdeps/x86_64/multiarch/memmove-ssse3.S
index 310ff62b86..a88fde4a8f 100644
--- a/sysdeps/x86_64/multiarch/memmove-ssse3.S
+++ b/sysdeps/x86_64/multiarch/memmove-ssse3.S
@@ -1,19 +1,42 @@
-#include <sysdep.h>
-
-#ifndef MEMMOVE
-# define MEMMOVE __memmove_ssse3
-# define MEMMOVE_CHK __memmove_chk_ssse3
-# define MEMCPY __memcpy_ssse3
-# define MEMCPY_CHK __memcpy_chk_ssse3
-# define MEMPCPY __mempcpy_ssse3
-# define MEMPCPY_CHK __mempcpy_chk_ssse3
-#endif
+/* memmove/memcpy/mempcpy optimized for aligned access with SSSE3.
+ All versions must be listed in ifunc-impl-list.c.
+ Copyright (C) 2022 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+
+#if IS_IN (libc)
+
+# include <sysdep.h>
+# ifndef MEMMOVE
+# define MEMMOVE __memmove_ssse3
+# define MEMMOVE_CHK __memmove_chk_ssse3
+# define MEMCPY __memcpy_ssse3
+# define MEMCPY_CHK __memcpy_chk_ssse3
+# define MEMPCPY __mempcpy_ssse3
+# define MEMPCPY_CHK __mempcpy_chk_ssse3
+# endif
.section .text.ssse3, "ax", @progbits
+# if defined SHARED
ENTRY(MEMPCPY_CHK)
cmp %RDX_LP, %RCX_LP
jb HIDDEN_JUMPTARGET(__chk_fail)
END(MEMPCPY_CHK)
+# endif
ENTRY(MEMPCPY)
mov %RDI_LP, %RAX_LP
@@ -21,10 +44,12 @@ ENTRY(MEMPCPY)
jmp L(start)
END(MEMPCPY)
+# if defined SHARED
ENTRY(MEMMOVE_CHK)
cmp %RDX_LP, %RCX_LP
jb HIDDEN_JUMPTARGET(__chk_fail)
END(MEMMOVE_CHK)
+# endif
ENTRY_P2ALIGN(MEMMOVE, 6)
# ifdef __ILP32__
@@ -124,11 +149,11 @@ L(more_2x_vec):
loop. */
movups %xmm0, (%rdi)
-#ifdef SHARED_CACHE_SIZE_HALF
+# ifdef SHARED_CACHE_SIZE_HALF
cmp $SHARED_CACHE_SIZE_HALF, %RDX_LP
-#else
+# else
cmp __x86_shared_cache_size_half(%rip), %rdx
-#endif
+# endif
ja L(large_memcpy)
leaq -64(%rdi, %rdx), %r8
@@ -206,7 +231,7 @@ L(end_loop_fwd):
/* Extactly 64 bytes if `jmp L(end_loop_fwd)` is long encoding.
60 bytes otherwise. */
-#define ALIGNED_LOOP_FWD(align_by); \
+# define ALIGNED_LOOP_FWD(align_by); \
.p2align 6; \
L(loop_fwd_ ## align_by): \
movaps 16(%rsi), %xmm0; \
@@ -275,7 +300,7 @@ L(end_large_loop_fwd):
/* Size > 64 bytes and <= 96 bytes. 32-byte align between ensure
96-byte spacing between each. */
-#define ALIGNED_LARGE_LOOP_FWD(align_by); \
+# define ALIGNED_LARGE_LOOP_FWD(align_by); \
.p2align 5; \
L(large_loop_fwd_ ## align_by): \
movaps 16(%rsi), %xmm0; \
@@ -343,7 +368,7 @@ L(end_loop_bkwd):
/* Extactly 64 bytes if `jmp L(end_loop_bkwd)` is long encoding.
60 bytes otherwise. */
-#define ALIGNED_LOOP_BKWD(align_by); \
+# define ALIGNED_LOOP_BKWD(align_by); \
.p2align 6; \
L(loop_bkwd_ ## align_by): \
movaps 32(%rsi), %xmm1; \
@@ -381,4 +406,7 @@ L(loop_bkwd_ ## align_by): \
END(MEMMOVE)
strong_alias (MEMMOVE, MEMCPY)
+# if defined SHARED
strong_alias (MEMMOVE_CHK, MEMCPY_CHK)
+# endif
+#endif