aboutsummaryrefslogtreecommitdiff
path: root/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
diff options
context:
space:
mode:
Diffstat (limited to 'sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S')
-rw-r--r--sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S33
1 files changed, 15 insertions, 18 deletions
diff --git a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
index 50fffeb5ce..386624b3c4 100644
--- a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
+++ b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
@@ -158,11 +158,12 @@ L(last_2x_vec):
VMOVU -VEC_SIZE(%rsi,%rdx), %VEC(1)
VMOVU %VEC(0), (%rdi)
VMOVU %VEC(1), -VEC_SIZE(%rdi,%rdx)
- VZEROUPPER
#if !defined USE_MULTIARCH || !IS_IN (libc)
L(nop):
-#endif
ret
+#else
+ VZEROUPPER_RETURN
+#endif
#if defined USE_MULTIARCH && IS_IN (libc)
END (MEMMOVE_SYMBOL (__memmove, unaligned))
@@ -255,8 +256,11 @@ L(last_2x_vec):
VMOVU %VEC(0), (%rdi)
VMOVU %VEC(1), -VEC_SIZE(%rdi,%rdx)
L(return):
- VZEROUPPER
+#if VEC_SIZE > 16
+ ZERO_UPPER_VEC_REGISTERS_RETURN
+#else
ret
+#endif
L(movsb):
cmpq __x86_shared_non_temporal_threshold(%rip), %rdx
@@ -324,8 +328,7 @@ L(between_32_63):
VMOVU -32(%rsi,%rdx), %YMM1
VMOVU %YMM0, (%rdi)
VMOVU %YMM1, -32(%rdi,%rdx)
- VZEROUPPER
- ret
+ VZEROUPPER_RETURN
#endif
#if VEC_SIZE > 16
/* From 16 to 31. No branch when size == 16. */
@@ -334,7 +337,7 @@ L(between_16_31):
VMOVU -16(%rsi,%rdx), %XMM1
VMOVU %XMM0, (%rdi)
VMOVU %XMM1, -16(%rdi,%rdx)
- ret
+ VZEROUPPER_RETURN
#endif
L(between_8_15):
/* From 8 to 15. No branch when size == 8. */
@@ -387,8 +390,7 @@ L(more_2x_vec):
VMOVU %VEC(5), -(VEC_SIZE * 2)(%rdi,%rdx)
VMOVU %VEC(6), -(VEC_SIZE * 3)(%rdi,%rdx)
VMOVU %VEC(7), -(VEC_SIZE * 4)(%rdi,%rdx)
- VZEROUPPER
- ret
+ VZEROUPPER_RETURN
L(last_4x_vec):
/* Copy from 2 * VEC to 4 * VEC. */
VMOVU (%rsi), %VEC(0)
@@ -399,8 +401,7 @@ L(last_4x_vec):
VMOVU %VEC(1), VEC_SIZE(%rdi)
VMOVU %VEC(2), -VEC_SIZE(%rdi,%rdx)
VMOVU %VEC(3), -(VEC_SIZE * 2)(%rdi,%rdx)
- VZEROUPPER
- ret
+ VZEROUPPER_RETURN
L(more_8x_vec):
cmpq %rsi, %rdi
@@ -456,8 +457,7 @@ L(loop_4x_vec_forward):
VMOVU %VEC(8), -(VEC_SIZE * 3)(%rcx)
/* Store the first VEC. */
VMOVU %VEC(4), (%r11)
- VZEROUPPER
- ret
+ VZEROUPPER_RETURN
L(more_8x_vec_backward):
/* Load the first 4 * VEC and last VEC to support overlapping
@@ -508,8 +508,7 @@ L(loop_4x_vec_backward):
VMOVU %VEC(7), (VEC_SIZE * 3)(%rdi)
/* Store the last VEC. */
VMOVU %VEC(8), (%r11)
- VZEROUPPER
- ret
+ VZEROUPPER_RETURN
#if (defined USE_MULTIARCH || VEC_SIZE == 16) && IS_IN (libc)
L(large_forward):
@@ -544,8 +543,7 @@ L(loop_large_forward):
VMOVU %VEC(8), -(VEC_SIZE * 3)(%rcx)
/* Store the first VEC. */
VMOVU %VEC(4), (%r11)
- VZEROUPPER
- ret
+ VZEROUPPER_RETURN
L(large_backward):
/* Don't use non-temporal store if there is overlap between
@@ -579,8 +577,7 @@ L(loop_large_backward):
VMOVU %VEC(7), (VEC_SIZE * 3)(%rdi)
/* Store the last VEC. */
VMOVU %VEC(8), (%r11)
- VZEROUPPER
- ret
+ VZEROUPPER_RETURN
#endif
END (MEMMOVE_SYMBOL (__memmove, unaligned_erms))