aboutsummaryrefslogtreecommitdiff
path: root/sysdeps/x86_64
diff options
context:
space:
mode:
authorH.J. Lu <hjl.tools@gmail.com>2020-05-09 12:04:23 -0700
committerH.J. Lu <hjl.tools@gmail.com>2020-05-09 12:28:15 -0700
commit55c7bcc71b84123d5d4bd2814366a6b05fcf8ebd (patch)
treecf6bd713ccee86b9ce501a94a6709d782d7b0d72 /sysdeps/x86_64
parente1b871e25f3add3b63d465694b6731d95cafb299 (diff)
downloadglibc-55c7bcc71b84123d5d4bd2814366a6b05fcf8ebd.tar
glibc-55c7bcc71b84123d5d4bd2814366a6b05fcf8ebd.tar.gz
glibc-55c7bcc71b84123d5d4bd2814366a6b05fcf8ebd.tar.bz2
glibc-55c7bcc71b84123d5d4bd2814366a6b05fcf8ebd.zip
x86-64: Use RDX_LP on __x86_shared_non_temporal_threshold [BZ #25966]
Since __x86_shared_non_temporal_threshold is defined as long int __x86_shared_non_temporal_threshold; and long int is 4 bytes for x32, use RDX_LP to compare against __x86_shared_non_temporal_threshold in assembly code.
Diffstat (limited to 'sysdeps/x86_64')
-rw-r--r--sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S6
1 files changed, 3 insertions, 3 deletions
diff --git a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
index c763b7d871..74953245aa 100644
--- a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
+++ b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
@@ -244,7 +244,7 @@ L(return):
ret
L(movsb):
- cmpq __x86_shared_non_temporal_threshold(%rip), %rdx
+ cmp __x86_shared_non_temporal_threshold(%rip), %RDX_LP
jae L(more_8x_vec)
cmpq %rsi, %rdi
jb 1f
@@ -402,7 +402,7 @@ L(more_8x_vec):
addq %r8, %rdx
#if (defined USE_MULTIARCH || VEC_SIZE == 16) && IS_IN (libc)
/* Check non-temporal store threshold. */
- cmpq __x86_shared_non_temporal_threshold(%rip), %rdx
+ cmp __x86_shared_non_temporal_threshold(%rip), %RDX_LP
ja L(large_forward)
#endif
L(loop_4x_vec_forward):
@@ -454,7 +454,7 @@ L(more_8x_vec_backward):
subq %r8, %rdx
#if (defined USE_MULTIARCH || VEC_SIZE == 16) && IS_IN (libc)
/* Check non-temporal store threshold. */
- cmpq __x86_shared_non_temporal_threshold(%rip), %rdx
+ cmp __x86_shared_non_temporal_threshold(%rip), %RDX_LP
ja L(large_backward)
#endif
L(loop_4x_vec_backward):