aboutsummaryrefslogtreecommitdiff
path: root/sysdeps
diff options
context:
space:
mode:
authorH.J. Lu <hjl.tools@gmail.com>2016-03-16 14:24:01 -0700
committerH.J. Lu <hjl.tools@gmail.com>2016-03-16 14:24:19 -0700
commit86ed888255bcafa7cd3c4eb248815b1ba4eb3699 (patch)
tree996b1218916e2f27ca69e4c72d69e511118955a7 /sysdeps
parent0863cf2ada0a3944d0381e93d1c35ba6e1d43b53 (diff)
downloadglibc-86ed888255bcafa7cd3c4eb248815b1ba4eb3699.tar
glibc-86ed888255bcafa7cd3c4eb248815b1ba4eb3699.tar.gz
glibc-86ed888255bcafa7cd3c4eb248815b1ba4eb3699.tar.bz2
glibc-86ed888255bcafa7cd3c4eb248815b1ba4eb3699.zip
Use JUMPTARGET in x86-64 mathvec
When PLT may be used, JUMPTARGET should be used instead calling the function directly. * sysdeps/x86_64/fpu/multiarch/svml_d_cos2_core_sse4.S (_ZGVbN2v_cos_sse4): Use JUMPTARGET to call cos. * sysdeps/x86_64/fpu/multiarch/svml_d_cos4_core_avx2.S (_ZGVdN4v_cos_avx2): Likewise. * sysdeps/x86_64/fpu/multiarch/svml_d_cos8_core_avx512.S (_ZGVdN4v_cos): Likewise. * sysdeps/x86_64/fpu/multiarch/svml_d_exp2_core_sse4.S (_ZGVbN2v_exp_sse4): Use JUMPTARGET to call exp. * sysdeps/x86_64/fpu/multiarch/svml_d_exp4_core_avx2.S (_ZGVdN4v_exp_avx2): Likewise. * sysdeps/x86_64/fpu/multiarch/svml_d_exp8_core_avx512.S (_ZGVdN4v_exp): Likewise. * sysdeps/x86_64/fpu/multiarch/svml_d_log2_core_sse4.S (_ZGVbN2v_log_sse4): Use JUMPTARGET to call log. * sysdeps/x86_64/fpu/multiarch/svml_d_log4_core_avx2.S (_ZGVdN4v_log_avx2): Likewise. * sysdeps/x86_64/fpu/multiarch/svml_d_log8_core_avx512.S (_ZGVdN4v_log): Likewise. * sysdeps/x86_64/fpu/multiarch/svml_d_pow2_core_sse4.S (_ZGVbN2vv_pow_sse4): Use JUMPTARGET to call pow. * sysdeps/x86_64/fpu/multiarch/svml_d_pow4_core_avx2.S (_ZGVdN4vv_pow_avx2): Likewise. * sysdeps/x86_64/fpu/multiarch/svml_d_pow8_core_avx512.S (_ZGVdN4vv_pow): Likewise. * sysdeps/x86_64/fpu/multiarch/svml_d_sin2_core_sse4.S (_ZGVbN2v_sin_sse4): Use JUMPTARGET to call sin. * sysdeps/x86_64/fpu/multiarch/svml_d_sin4_core_avx2.S (_ZGVdN4v_sin_avx2): Likewise. * sysdeps/x86_64/fpu/multiarch/svml_d_sin8_core_avx512.S (_ZGVdN4v_sin): Likewise. * sysdeps/x86_64/fpu/multiarch/svml_d_sincos2_core_sse4.S (_ZGVbN2vvv_sincos_sse4): Use JUMPTARGET to call sin and cos. * sysdeps/x86_64/fpu/multiarch/svml_d_sincos4_core_avx2.S (_ZGVdN4vvv_sincos_avx2): Likewise. * sysdeps/x86_64/fpu/multiarch/svml_d_sincos8_core_avx512.S (_ZGVdN4vvv_sincos): Likewise. * sysdeps/x86_64/fpu/multiarch/svml_s_cosf16_core_avx512.S (_ZGVdN8v_cosf): Use JUMPTARGET to call cosf. * sysdeps/x86_64/fpu/multiarch/svml_s_cosf4_core_sse4.S (_ZGVbN4v_cosf_sse4): Likewise. * sysdeps/x86_64/fpu/multiarch/svml_s_cosf8_core_avx2.S (_ZGVdN8v_cosf_avx2): Likewise. * sysdeps/x86_64/fpu/multiarch/svml_s_expf16_core_avx512.S (_ZGVdN8v_expf): Use JUMPTARGET to call expf. * sysdeps/x86_64/fpu/multiarch/svml_s_expf4_core_sse4.S (_ZGVbN4v_expf_sse4): Likewise. * sysdeps/x86_64/fpu/multiarch/svml_s_expf8_core_avx2.S (_ZGVdN8v_expf_avx2): Likewise. * sysdeps/x86_64/fpu/multiarch/svml_s_logf16_core_avx512.S (_ZGVdN8v_logf): Use JUMPTARGET to call logf. * sysdeps/x86_64/fpu/multiarch/svml_s_logf4_core_sse4.S (_ZGVbN4v_logf_sse4): Likewise. * sysdeps/x86_64/fpu/multiarch/svml_s_logf8_core_avx2.S (_ZGVdN8v_logf_avx2): Likewise. * sysdeps/x86_64/fpu/multiarch/svml_s_powf16_core_avx512.S (_ZGVdN8vv_powf): Use JUMPTARGET to call powf. * sysdeps/x86_64/fpu/multiarch/svml_s_powf4_core_sse4.S (_ZGVbN4vv_powf_sse4): Likewise. * sysdeps/x86_64/fpu/multiarch/svml_s_powf8_core_avx2.S (_ZGVdN8vv_powf_avx2): Likewise. * sysdeps/x86_64/fpu/multiarch/svml_s_sincosf16_core_avx512.S (_ZGVdN8vv_powf): Use JUMPTARGET to call sinf and cosf. * sysdeps/x86_64/fpu/multiarch/svml_s_sincosf4_core_sse4.S (_ZGVbN4vvv_sincosf_sse4): Likewise. * sysdeps/x86_64/fpu/multiarch/svml_s_sincosf8_core_avx2.S (_ZGVdN8vvv_sincosf_avx2): Likewise. * sysdeps/x86_64/fpu/multiarch/svml_s_sinf16_core_avx512.S (_ZGVdN8v_sinf): Use JUMPTARGET to call sinf. * sysdeps/x86_64/fpu/multiarch/svml_s_sinf4_core_sse4.S (_ZGVbN4v_sinf_sse4): Likewise. * sysdeps/x86_64/fpu/multiarch/svml_s_sinf8_core_avx2.S (_ZGVdN8v_sinf_avx2): Likewise. * sysdeps/x86_64/fpu/svml_d_wrapper_impl.h (WRAPPER_IMPL_SSE2): Use JUMPTARGET to call callee. (WRAPPER_IMPL_SSE2_ff): Likewise. (WRAPPER_IMPL_SSE2_fFF): Likewise. (WRAPPER_IMPL_AVX): Likewise. (WRAPPER_IMPL_AVX_ff): Likewise. (WRAPPER_IMPL_AVX_fFF): Likewise. (WRAPPER_IMPL_AVX512): Likewise. (WRAPPER_IMPL_AVX512_ff): Likewise. * sysdeps/x86_64/fpu/svml_s_wrapper_impl.h (WRAPPER_IMPL_SSE2): Likewise. (WRAPPER_IMPL_SSE2_ff): Likewise. (WRAPPER_IMPL_SSE2_fFF): Likewise. (WRAPPER_IMPL_AVX): Likewise. (WRAPPER_IMPL_AVX_ff): Likewise. (WRAPPER_IMPL_AVX_fFF): Likewise. (WRAPPER_IMPL_AVX512): Likewise. (WRAPPER_IMPL_AVX512_ff): Likewise. (WRAPPER_IMPL_AVX512_fFF): Likewise.
Diffstat (limited to 'sysdeps')
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_d_cos2_core_sse4.S4
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_d_cos4_core_avx2.S4
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_d_cos8_core_avx512.S8
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_d_exp2_core_sse4.S4
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_d_exp4_core_avx2.S4
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_d_exp8_core_avx512.S8
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_d_log2_core_sse4.S4
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_d_log4_core_avx2.S4
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_d_log8_core_avx512.S8
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_d_pow2_core_sse4.S4
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_d_pow4_core_avx2.S4
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_d_pow8_core_avx512.S8
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_d_sin2_core_sse4.S4
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_d_sin4_core_avx2.S4
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_d_sin8_core_avx512.S8
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_d_sincos2_core_sse4.S8
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_d_sincos4_core_avx2.S8
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_d_sincos8_core_avx512.S16
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_s_cosf16_core_avx512.S8
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_s_cosf4_core_sse4.S4
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_s_cosf8_core_avx2.S4
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_s_expf16_core_avx512.S8
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_s_expf4_core_sse4.S4
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_s_expf8_core_avx2.S4
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_s_logf16_core_avx512.S8
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_s_logf4_core_sse4.S4
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_s_logf8_core_avx2.S4
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_s_powf16_core_avx512.S8
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_s_powf4_core_sse4.S4
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_s_powf8_core_avx2.S4
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_s_sincosf16_core_avx512.S16
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_s_sincosf4_core_sse4.S8
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_s_sincosf8_core_avx2.S8
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_s_sinf16_core_avx512.S8
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_s_sinf4_core_sse4.S4
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_s_sinf8_core_avx2.S4
-rw-r--r--sysdeps/x86_64/fpu/svml_d_wrapper_impl.h12
-rw-r--r--sysdeps/x86_64/fpu/svml_s_wrapper_impl.h24
38 files changed, 130 insertions, 130 deletions
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_cos2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_cos2_core_sse4.S
index 088fcae067..4d2ebf7aaf 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_cos2_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_cos2_core_sse4.S
@@ -205,7 +205,7 @@ ENTRY (_ZGVbN2v_cos_sse4)
shlq $4, %r15
movsd 200(%rsp,%r15), %xmm0
- call cos@PLT
+ call JUMPTARGET(cos)
movsd %xmm0, 264(%rsp,%r15)
jmp .LBL_1_8
@@ -215,7 +215,7 @@ ENTRY (_ZGVbN2v_cos_sse4)
shlq $4, %r15
movsd 192(%rsp,%r15), %xmm0
- call cos@PLT
+ call JUMPTARGET(cos)
movsd %xmm0, 256(%rsp,%r15)
jmp .LBL_1_7
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_cos4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_cos4_core_avx2.S
index 4e653216d9..54f7e5e89e 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_cos4_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_cos4_core_avx2.S
@@ -188,7 +188,7 @@ ENTRY (_ZGVdN4v_cos_avx2)
vmovsd 328(%rsp,%r15), %xmm0
vzeroupper
- call cos@PLT
+ call JUMPTARGET(cos)
vmovsd %xmm0, 392(%rsp,%r15)
jmp .LBL_1_8
@@ -199,7 +199,7 @@ ENTRY (_ZGVdN4v_cos_avx2)
vmovsd 320(%rsp,%r15), %xmm0
vzeroupper
- call cos@PLT
+ call JUMPTARGET(cos)
vmovsd %xmm0, 384(%rsp,%r15)
jmp .LBL_1_7
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_cos8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_cos8_core_avx512.S
index 1cac1d827a..874bd80d23 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_cos8_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_cos8_core_avx512.S
@@ -221,7 +221,7 @@ WRAPPER_IMPL_AVX512 _ZGVdN4v_cos
movzbl %r12b, %r15d
shlq $4, %r15
vmovsd 1160(%rsp,%r15), %xmm0
- call cos@PLT
+ call JUMPTARGET(cos)
vmovsd %xmm0, 1224(%rsp,%r15)
jmp .LBL_1_8
@@ -229,7 +229,7 @@ WRAPPER_IMPL_AVX512 _ZGVdN4v_cos
movzbl %r12b, %r15d
shlq $4, %r15
vmovsd 1152(%rsp,%r15), %xmm0
- call cos@PLT
+ call JUMPTARGET(cos)
vmovsd %xmm0, 1216(%rsp,%r15)
jmp .LBL_1_7
#endif
@@ -438,7 +438,7 @@ WRAPPER_IMPL_AVX512 _ZGVdN4v_cos
vzeroupper
vmovsd 1160(%rsp,%r15), %xmm0
- call cos@PLT
+ call JUMPTARGET(cos)
vmovsd %xmm0, 1224(%rsp,%r15)
jmp .LBL_2_8
@@ -450,7 +450,7 @@ WRAPPER_IMPL_AVX512 _ZGVdN4v_cos
vzeroupper
vmovsd 1152(%rsp,%r15), %xmm0
- call cos@PLT
+ call JUMPTARGET(cos)
vmovsd %xmm0, 1216(%rsp,%r15)
jmp .LBL_2_7
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_exp2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_exp2_core_sse4.S
index 445b230152..9a779593cd 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_exp2_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_exp2_core_sse4.S
@@ -207,7 +207,7 @@ ENTRY (_ZGVbN2v_exp_sse4)
shlq $4, %r15
movsd 200(%rsp,%r15), %xmm0
- call exp@PLT
+ call JUMPTARGET(exp)
movsd %xmm0, 264(%rsp,%r15)
jmp .LBL_1_8
@@ -217,7 +217,7 @@ ENTRY (_ZGVbN2v_exp_sse4)
shlq $4, %r15
movsd 192(%rsp,%r15), %xmm0
- call exp@PLT
+ call JUMPTARGET(exp)
movsd %xmm0, 256(%rsp,%r15)
jmp .LBL_1_7
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_exp4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_exp4_core_avx2.S
index 25f9e28941..2a35fe3846 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_exp4_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_exp4_core_avx2.S
@@ -193,7 +193,7 @@ ENTRY (_ZGVdN4v_exp_avx2)
vmovsd 328(%rsp,%r15), %xmm0
vzeroupper
- call exp@PLT
+ call JUMPTARGET(exp)
vmovsd %xmm0, 392(%rsp,%r15)
jmp .LBL_1_8
@@ -204,7 +204,7 @@ ENTRY (_ZGVdN4v_exp_avx2)
vmovsd 320(%rsp,%r15), %xmm0
vzeroupper
- call exp@PLT
+ call JUMPTARGET(exp)
vmovsd %xmm0, 384(%rsp,%r15)
jmp .LBL_1_7
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_exp8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_exp8_core_avx512.S
index 74f1d2ce7b..456792dec6 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_exp8_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_exp8_core_avx512.S
@@ -223,7 +223,7 @@ WRAPPER_IMPL_AVX512 _ZGVdN4v_exp
movzbl %r12b, %r15d
shlq $4, %r15
vmovsd 1160(%rsp,%r15), %xmm0
- call exp@PLT
+ call JUMPTARGET(exp)
vmovsd %xmm0, 1224(%rsp,%r15)
jmp .LBL_1_8
@@ -231,7 +231,7 @@ WRAPPER_IMPL_AVX512 _ZGVdN4v_exp
movzbl %r12b, %r15d
shlq $4, %r15
vmovsd 1152(%rsp,%r15), %xmm0
- call exp@PLT
+ call JUMPTARGET(exp)
vmovsd %xmm0, 1216(%rsp,%r15)
jmp .LBL_1_7
#endif
@@ -438,7 +438,7 @@ WRAPPER_IMPL_AVX512 _ZGVdN4v_exp
vmovsd 1160(%rsp,%r15), %xmm0
vzeroupper
vmovsd 1160(%rsp,%r15), %xmm0
- call exp@PLT
+ call JUMPTARGET(exp)
vmovsd %xmm0, 1224(%rsp,%r15)
jmp .LBL_2_8
@@ -448,7 +448,7 @@ WRAPPER_IMPL_AVX512 _ZGVdN4v_exp
vmovsd 1152(%rsp,%r15), %xmm0
vzeroupper
vmovsd 1152(%rsp,%r15), %xmm0
- call exp@PLT
+ call JUMPTARGET(exp)
vmovsd %xmm0, 1216(%rsp,%r15)
jmp .LBL_2_7
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_log2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_log2_core_sse4.S
index 5d254288f6..67959729d7 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_log2_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_log2_core_sse4.S
@@ -211,7 +211,7 @@ ENTRY (_ZGVbN2v_log_sse4)
shlq $4, %r15
movsd 200(%rsp,%r15), %xmm0
- call log@PLT
+ call JUMPTARGET(log)
movsd %xmm0, 264(%rsp,%r15)
jmp .LBL_1_8
@@ -221,7 +221,7 @@ ENTRY (_ZGVbN2v_log_sse4)
shlq $4, %r15
movsd 192(%rsp,%r15), %xmm0
- call log@PLT
+ call JUMPTARGET(log)
movsd %xmm0, 256(%rsp,%r15)
jmp .LBL_1_7
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_log4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_log4_core_avx2.S
index 5da298747d..267dae0a1f 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_log4_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_log4_core_avx2.S
@@ -191,7 +191,7 @@ ENTRY (_ZGVdN4v_log_avx2)
vmovsd 328(%rsp,%r15), %xmm0
vzeroupper
- call log@PLT
+ call JUMPTARGET(log)
vmovsd %xmm0, 392(%rsp,%r15)
jmp .LBL_1_8
@@ -202,7 +202,7 @@ ENTRY (_ZGVdN4v_log_avx2)
vmovsd 320(%rsp,%r15), %xmm0
vzeroupper
- call log@PLT
+ call JUMPTARGET(log)
vmovsd %xmm0, 384(%rsp,%r15)
jmp .LBL_1_7
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_log8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_log8_core_avx512.S
index dca8e61f34..4c52a91605 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_log8_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_log8_core_avx512.S
@@ -222,7 +222,7 @@ WRAPPER_IMPL_AVX512 _ZGVdN4v_log
movzbl %r12b, %r15d
shlq $4, %r15
vmovsd 1160(%rsp,%r15), %xmm0
- call log@PLT
+ call JUMPTARGET(log)
vmovsd %xmm0, 1224(%rsp,%r15)
jmp .LBL_1_8
@@ -230,7 +230,7 @@ WRAPPER_IMPL_AVX512 _ZGVdN4v_log
movzbl %r12b, %r15d
shlq $4, %r15
vmovsd 1152(%rsp,%r15), %xmm0
- call log@PLT
+ call JUMPTARGET(log)
vmovsd %xmm0, 1216(%rsp,%r15)
jmp .LBL_1_7
#endif
@@ -443,7 +443,7 @@ WRAPPER_IMPL_AVX512 _ZGVdN4v_log
vzeroupper
vmovsd 1160(%rsp,%r15), %xmm0
- call log@PLT
+ call JUMPTARGET(log)
vmovsd %xmm0, 1224(%rsp,%r15)
jmp .LBL_2_8
@@ -455,7 +455,7 @@ WRAPPER_IMPL_AVX512 _ZGVdN4v_log
vzeroupper
vmovsd 1152(%rsp,%r15), %xmm0
- call log@PLT
+ call JUMPTARGET(log)
vmovsd %xmm0, 1216(%rsp,%r15)
jmp .LBL_2_7
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_pow2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_pow2_core_sse4.S
index 064d170878..699f74ed44 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_pow2_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_pow2_core_sse4.S
@@ -413,7 +413,7 @@ ENTRY (_ZGVbN2vv_pow_sse4)
movsd 72(%rsp,%r15), %xmm0
movsd 136(%rsp,%r15), %xmm1
- call pow@PLT
+ call JUMPTARGET(pow)
movsd %xmm0, 200(%rsp,%r15)
jmp .LBL_1_8
@@ -424,7 +424,7 @@ ENTRY (_ZGVbN2vv_pow_sse4)
movsd 64(%rsp,%r15), %xmm0
movsd 128(%rsp,%r15), %xmm1
- call pow@PLT
+ call JUMPTARGET(pow)
movsd %xmm0, 192(%rsp,%r15)
jmp .LBL_1_7
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_pow4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_pow4_core_avx2.S
index f2a73ffe1e..35ba076caa 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_pow4_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_pow4_core_avx2.S
@@ -367,7 +367,7 @@ ENTRY (_ZGVdN4vv_pow_avx2)
vmovsd 264(%rsp,%r15), %xmm1
vzeroupper
- call pow@PLT
+ call JUMPTARGET(pow)
vmovsd %xmm0, 328(%rsp,%r15)
jmp .LBL_1_8
@@ -379,7 +379,7 @@ ENTRY (_ZGVdN4vv_pow_avx2)
vmovsd 256(%rsp,%r15), %xmm1
vzeroupper
- call pow@PLT
+ call JUMPTARGET(pow)
vmovsd %xmm0, 320(%rsp,%r15)
jmp .LBL_1_7
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_pow8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_pow8_core_avx512.S
index 4a515233fc..fd6a88961e 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_pow8_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_pow8_core_avx512.S
@@ -392,7 +392,7 @@ WRAPPER_IMPL_AVX512_ff _ZGVdN4vv_pow
shlq $4, %r15
vmovsd 1160(%rsp,%r15), %xmm0
vmovsd 1224(%rsp,%r15), %xmm1
- call pow@PLT
+ call JUMPTARGET(pow)
vmovsd %xmm0, 1288(%rsp,%r15)
jmp .LBL_1_8
@@ -401,7 +401,7 @@ WRAPPER_IMPL_AVX512_ff _ZGVdN4vv_pow
shlq $4, %r15
vmovsd 1152(%rsp,%r15), %xmm0
vmovsd 1216(%rsp,%r15), %xmm1
- call pow@PLT
+ call JUMPTARGET(pow)
vmovsd %xmm0, 1280(%rsp,%r15)
jmp .LBL_1_7
@@ -720,7 +720,7 @@ WRAPPER_IMPL_AVX512_ff _ZGVdN4vv_pow
vzeroupper
vmovsd 1160(%rsp,%r15), %xmm0
- call pow@PLT
+ call JUMPTARGET(pow)
vmovsd %xmm0, 1288(%rsp,%r15)
jmp .LBL_2_8
@@ -732,7 +732,7 @@ WRAPPER_IMPL_AVX512_ff _ZGVdN4vv_pow
vzeroupper
vmovsd 1152(%rsp,%r15), %xmm0
- call pow@PLT
+ call JUMPTARGET(pow)
vmovsd %xmm0, 1280(%rsp,%r15)
jmp .LBL_2_7
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_sin2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_sin2_core_sse4.S
index 5755ce6f74..ccab3cc21e 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_sin2_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_sin2_core_sse4.S
@@ -211,7 +211,7 @@ ENTRY (_ZGVbN2v_sin_sse4)
shlq $4, %r15
movsd 200(%rsp,%r15), %xmm0
- call sin@PLT
+ call JUMPTARGET(sin)
movsd %xmm0, 264(%rsp,%r15)
jmp .LBL_1_8
@@ -221,7 +221,7 @@ ENTRY (_ZGVbN2v_sin_sse4)
shlq $4, %r15
movsd 192(%rsp,%r15), %xmm0
- call sin@PLT
+ call JUMPTARGET(sin)
movsd %xmm0, 256(%rsp,%r15)
jmp .LBL_1_7
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_sin4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_sin4_core_avx2.S
index 46b557158a..a32fbc3a43 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_sin4_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_sin4_core_avx2.S
@@ -191,7 +191,7 @@ ENTRY (_ZGVdN4v_sin_avx2)
vmovsd 328(%rsp,%r15), %xmm0
vzeroupper
- call sin@PLT
+ call JUMPTARGET(sin)
vmovsd %xmm0, 392(%rsp,%r15)
jmp .LBL_1_8
@@ -202,7 +202,7 @@ ENTRY (_ZGVdN4v_sin_avx2)
vmovsd 320(%rsp,%r15), %xmm0
vzeroupper
- call sin@PLT
+ call JUMPTARGET(sin)
vmovsd %xmm0, 384(%rsp,%r15)
jmp .LBL_1_7
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_sin8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_sin8_core_avx512.S
index 6c565f3861..d3449e3d29 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_sin8_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_sin8_core_avx512.S
@@ -222,7 +222,7 @@ WRAPPER_IMPL_AVX512 _ZGVdN4v_sin
movzbl %r12b, %r15d
shlq $4, %r15
vmovsd 1160(%rsp,%r15), %xmm0
- call sin@PLT
+ call JUMPTARGET(sin)
vmovsd %xmm0, 1224(%rsp,%r15)
jmp .LBL_1_8
@@ -230,7 +230,7 @@ WRAPPER_IMPL_AVX512 _ZGVdN4v_sin
movzbl %r12b, %r15d
shlq $4, %r15
vmovsd 1152(%rsp,%r15), %xmm0
- call sin@PLT
+ call JUMPTARGET(sin)
vmovsd %xmm0, 1216(%rsp,%r15)
jmp .LBL_1_7
#endif
@@ -440,7 +440,7 @@ WRAPPER_IMPL_AVX512 _ZGVdN4v_sin
vzeroupper
vmovsd 1160(%rsp,%r15), %xmm0
- call sin@PLT
+ call JUMPTARGET(sin)
vmovsd %xmm0, 1224(%rsp,%r15)
jmp .LBL_2_8
@@ -452,7 +452,7 @@ WRAPPER_IMPL_AVX512 _ZGVdN4v_sin
vzeroupper
vmovsd 1152(%rsp,%r15), %xmm0
- call sin@PLT
+ call JUMPTARGET(sin)
vmovsd %xmm0, 1216(%rsp,%r15)
jmp .LBL_2_7
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_sincos2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_sincos2_core_sse4.S
index 65ad540122..d37275d7ab 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_sincos2_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_sincos2_core_sse4.S
@@ -287,12 +287,12 @@ ENTRY (_ZGVbN2vvv_sincos_sse4)
shlq $4, %r15
movsd 136(%rsp,%r15), %xmm0
- call sin@PLT
+ call JUMPTARGET(sin)
movsd %xmm0, 200(%rsp,%r15)
movsd 136(%rsp,%r15), %xmm0
- call cos@PLT
+ call JUMPTARGET(cos)
movsd %xmm0, 264(%rsp,%r15)
jmp .LBL_1_8
@@ -302,12 +302,12 @@ ENTRY (_ZGVbN2vvv_sincos_sse4)
shlq $4, %r15
movsd 128(%rsp,%r15), %xmm0
- call sin@PLT
+ call JUMPTARGET(sin)
movsd %xmm0, 192(%rsp,%r15)
movsd 128(%rsp,%r15), %xmm0
- call cos@PLT
+ call JUMPTARGET(cos)
movsd %xmm0, 256(%rsp,%r15)
jmp .LBL_1_7
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_sincos4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_sincos4_core_avx2.S
index 60d03e9f8b..24b57f4e8c 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_sincos4_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_sincos4_core_avx2.S
@@ -248,12 +248,12 @@ ENTRY (_ZGVdN4vvv_sincos_avx2)
vmovsd 264(%rsp,%r15), %xmm0
vzeroupper
- call sin@PLT
+ call JUMPTARGET(sin)
vmovsd %xmm0, 328(%rsp,%r15)
vmovsd 264(%rsp,%r15), %xmm0
- call cos@PLT
+ call JUMPTARGET(cos)
vmovsd %xmm0, 392(%rsp,%r15)
jmp .LBL_1_8
@@ -264,12 +264,12 @@ ENTRY (_ZGVdN4vvv_sincos_avx2)
vmovsd 256(%rsp,%r15), %xmm0
vzeroupper
- call sin@PLT
+ call JUMPTARGET(sin)
vmovsd %xmm0, 320(%rsp,%r15)
vmovsd 256(%rsp,%r15), %xmm0
- call cos@PLT
+ call JUMPTARGET(cos)
vmovsd %xmm0, 384(%rsp,%r15)
jmp .LBL_1_7
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_sincos8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_sincos8_core_avx512.S
index 44700f90b8..1d9f426d37 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_sincos8_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_sincos8_core_avx512.S
@@ -278,12 +278,12 @@ WRAPPER_IMPL_AVX512_fFF _ZGVdN4vvv_sincos
shlq $4, %r15
vmovsd 1160(%rsp,%r15), %xmm0
- call sin@PLT
+ call JUMPTARGET(sin)
vmovsd %xmm0, 1224(%rsp,%r15)
vmovsd 1160(%rsp,%r15), %xmm0
- call cos@PLT
+ call JUMPTARGET(cos)
vmovsd %xmm0, 1288(%rsp,%r15)
jmp .LBL_1_8
@@ -293,12 +293,12 @@ WRAPPER_IMPL_AVX512_fFF _ZGVdN4vvv_sincos
shlq $4, %r15
vmovsd 1152(%rsp,%r15), %xmm0
- call sin@PLT
+ call JUMPTARGET(sin)
vmovsd %xmm0, 1216(%rsp,%r15)
vmovsd 1152(%rsp,%r15), %xmm0
- call cos@PLT
+ call JUMPTARGET(cos)
vmovsd %xmm0, 1280(%rsp,%r15)
jmp .LBL_1_7
@@ -557,12 +557,12 @@ WRAPPER_IMPL_AVX512_fFF _ZGVdN4vvv_sincos
vzeroupper
vmovsd 1160(%rsp,%r15), %xmm0
- call sin@PLT
+ call JUMPTARGET(sin)
vmovsd %xmm0, 1224(%rsp,%r15)
vmovsd 1160(%rsp,%r15), %xmm0
- call cos@PLT
+ call JUMPTARGET(cos)
vmovsd %xmm0, 1288(%rsp,%r15)
jmp .LBL_2_8
@@ -574,12 +574,12 @@ WRAPPER_IMPL_AVX512_fFF _ZGVdN4vvv_sincos
vzeroupper
vmovsd 1152(%rsp,%r15), %xmm0
- call sin@PLT
+ call JUMPTARGET(sin)
vmovsd %xmm0, 1216(%rsp,%r15)
vmovsd 1152(%rsp,%r15), %xmm0
- call cos@PLT
+ call JUMPTARGET(cos)
vmovsd %xmm0, 1280(%rsp,%r15)
jmp .LBL_2_7
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_cosf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_cosf16_core_avx512.S
index 5004cd4758..b39ec3ad2f 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_cosf16_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_cosf16_core_avx512.S
@@ -225,14 +225,14 @@ WRAPPER_IMPL_AVX512 _ZGVdN8v_cosf
cfi_restore_state
movzbl %r12b, %r15d
vmovss 1156(%rsp,%r15,8), %xmm0
- call cosf@PLT
+ call JUMPTARGET(cosf)
vmovss %xmm0, 1220(%rsp,%r15,8)
jmp .LBL_1_8
.LBL_1_12:
movzbl %r12b, %r15d
vmovss 1152(%rsp,%r15,8), %xmm0
- call cosf@PLT
+ call JUMPTARGET(cosf)
vmovss %xmm0, 1216(%rsp,%r15,8)
jmp .LBL_1_7
#endif
@@ -440,7 +440,7 @@ WRAPPER_IMPL_AVX512 _ZGVdN8v_cosf
vmovss 1156(%rsp,%r15,8), %xmm0
vzeroupper
vmovss 1156(%rsp,%r15,8), %xmm0
- call cosf@PLT
+ call JUMPTARGET(cosf)
vmovss %xmm0, 1220(%rsp,%r15,8)
jmp .LBL_2_8
.LBL_2_12:
@@ -448,7 +448,7 @@ WRAPPER_IMPL_AVX512 _ZGVdN8v_cosf
vmovss 1152(%rsp,%r15,8), %xmm0
vzeroupper
vmovss 1152(%rsp,%r15,8), %xmm0
- call cosf@PLT
+ call JUMPTARGET(cosf)
vmovss %xmm0, 1216(%rsp,%r15,8)
jmp .LBL_2_7
#endif
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_cosf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_cosf4_core_sse4.S
index d23ff72a30..90857dc1c3 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_cosf4_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_cosf4_core_sse4.S
@@ -211,7 +211,7 @@ ENTRY (_ZGVbN4v_cosf_sse4)
movzbl %r12b, %r15d
movss 196(%rsp,%r15,8), %xmm0
- call cosf@PLT
+ call JUMPTARGET(cosf)
movss %xmm0, 260(%rsp,%r15,8)
jmp .LBL_1_8
@@ -220,7 +220,7 @@ ENTRY (_ZGVbN4v_cosf_sse4)
movzbl %r12b, %r15d
movss 192(%rsp,%r15,8), %xmm0
- call cosf@PLT
+ call JUMPTARGET(cosf)
movss %xmm0, 256(%rsp,%r15,8)
jmp .LBL_1_7
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_cosf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_cosf8_core_avx2.S
index 513f3c0a29..7a793a9815 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_cosf8_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_cosf8_core_avx2.S
@@ -197,7 +197,7 @@ ENTRY (_ZGVdN8v_cosf_avx2)
vmovss 324(%rsp,%r15,8), %xmm0
vzeroupper
- call cosf@PLT
+ call JUMPTARGET(cosf)
vmovss %xmm0, 388(%rsp,%r15,8)
jmp .LBL_1_8
@@ -207,7 +207,7 @@ ENTRY (_ZGVdN8v_cosf_avx2)
vmovss 320(%rsp,%r15,8), %xmm0
vzeroupper
- call cosf@PLT
+ call JUMPTARGET(cosf)
vmovss %xmm0, 384(%rsp,%r15,8)
jmp .LBL_1_7
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_expf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_expf16_core_avx512.S
index 7eb7a1b775..44f61a2d41 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_expf16_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_expf16_core_avx512.S
@@ -212,14 +212,14 @@ WRAPPER_IMPL_AVX512 _ZGVdN8v_expf
cfi_restore_state
movzbl %r12b, %r15d
vmovss 1156(%rsp,%r15,8), %xmm0
- call expf@PLT
+ call JUMPTARGET(expf)
vmovss %xmm0, 1220(%rsp,%r15,8)
jmp .LBL_1_8
.LBL_1_12:
movzbl %r12b, %r15d
vmovss 1152(%rsp,%r15,8), %xmm0
- call expf@PLT
+ call JUMPTARGET(expf)
vmovss %xmm0, 1216(%rsp,%r15,8)
jmp .LBL_1_7
@@ -422,7 +422,7 @@ WRAPPER_IMPL_AVX512 _ZGVdN8v_expf
vzeroupper
vmovss 1156(%rsp,%r15,8), %xmm0
- call expf@PLT
+ call JUMPTARGET(expf)
vmovss %xmm0, 1220(%rsp,%r15,8)
jmp .LBL_2_8
@@ -433,7 +433,7 @@ WRAPPER_IMPL_AVX512 _ZGVdN8v_expf
vzeroupper
vmovss 1152(%rsp,%r15,8), %xmm0
- call expf@PLT
+ call JUMPTARGET(expf)
vmovss %xmm0, 1216(%rsp,%r15,8)
jmp .LBL_2_7
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_expf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_expf4_core_sse4.S
index c6f91e8dc1..d3db509ec4 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_expf4_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_expf4_core_sse4.S
@@ -195,7 +195,7 @@ ENTRY (_ZGVbN4v_expf_sse4)
movzbl %r12b, %r15d
movss 196(%rsp,%r15,8), %xmm0
- call expf@PLT
+ call JUMPTARGET(expf)
movss %xmm0, 260(%rsp,%r15,8)
jmp .LBL_1_8
@@ -204,7 +204,7 @@ ENTRY (_ZGVbN4v_expf_sse4)
movzbl %r12b, %r15d
movss 192(%rsp,%r15,8), %xmm0
- call expf@PLT
+ call JUMPTARGET(expf)
movss %xmm0, 256(%rsp,%r15,8)
jmp .LBL_1_7
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_expf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_expf8_core_avx2.S
index c6be6954f7..a80a9ec490 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_expf8_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_expf8_core_avx2.S
@@ -184,7 +184,7 @@ ENTRY(_ZGVdN8v_expf_avx2)
vmovss 324(%rsp,%r15,8), %xmm0
vzeroupper
- call expf@PLT
+ call JUMPTARGET(expf)
vmovss %xmm0, 388(%rsp,%r15,8)
jmp .LBL_1_8
@@ -194,7 +194,7 @@ ENTRY(_ZGVdN8v_expf_avx2)
vmovss 320(%rsp,%r15,8), %xmm0
vzeroupper
- call expf@PLT
+ call JUMPTARGET(expf)
vmovss %xmm0, 384(%rsp,%r15,8)
jmp .LBL_1_7
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_logf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_logf16_core_avx512.S
index 6209058381..8d57e65bb7 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_logf16_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_logf16_core_avx512.S
@@ -197,14 +197,14 @@ WRAPPER_IMPL_AVX512 _ZGVdN8v_logf
cfi_restore_state
movzbl %r12b, %r15d
vmovss 1156(%rsp,%r15,8), %xmm0
- call logf@PLT
+ call JUMPTARGET(logf)
vmovss %xmm0, 1220(%rsp,%r15,8)
jmp .LBL_1_8
.LBL_1_12:
movzbl %r12b, %r15d
vmovss 1152(%rsp,%r15,8), %xmm0
- call logf@PLT
+ call JUMPTARGET(logf)
vmovss %xmm0, 1216(%rsp,%r15,8)
jmp .LBL_1_7
#endif
@@ -391,7 +391,7 @@ WRAPPER_IMPL_AVX512 _ZGVdN8v_logf
vzeroupper
vmovss 1156(%rsp,%r15,8), %xmm0
- call logf@PLT
+ call JUMPTARGET(logf)
vmovss %xmm0, 1220(%rsp,%r15,8)
jmp .LBL_2_8
@@ -402,7 +402,7 @@ WRAPPER_IMPL_AVX512 _ZGVdN8v_logf
vzeroupper
vmovss 1152(%rsp,%r15,8), %xmm0
- call logf@PLT
+ call JUMPTARGET(logf)
vmovss %xmm0, 1216(%rsp,%r15,8)
jmp .LBL_2_7
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_logf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_logf4_core_sse4.S
index 1ce9838513..22310f1c04 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_logf4_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_logf4_core_sse4.S
@@ -177,7 +177,7 @@ ENTRY (_ZGVbN4v_logf_sse4)
movzbl %r12b, %r15d
movss 196(%rsp,%r15,8), %xmm0
- call logf@PLT
+ call JUMPTARGET(logf)
movss %xmm0, 260(%rsp,%r15,8)
jmp .LBL_1_8
@@ -186,7 +186,7 @@ ENTRY (_ZGVbN4v_logf_sse4)
movzbl %r12b, %r15d
movss 192(%rsp,%r15,8), %xmm0
- call logf@PLT
+ call JUMPTARGET(logf)
movss %xmm0, 256(%rsp,%r15,8)
jmp .LBL_1_7
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_logf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_logf8_core_avx2.S
index 91fb549ce6..b69d53f93c 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_logf8_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_logf8_core_avx2.S
@@ -166,7 +166,7 @@ ENTRY(_ZGVdN8v_logf_avx2)
vmovss 324(%rsp,%r15,8), %xmm0
vzeroupper
- call logf@PLT
+ call JUMPTARGET(logf)
vmovss %xmm0, 388(%rsp,%r15,8)
jmp .LBL_1_8
@@ -176,7 +176,7 @@ ENTRY(_ZGVdN8v_logf_avx2)
vmovss 320(%rsp,%r15,8), %xmm0
vzeroupper
- call logf@PLT
+ call JUMPTARGET(logf)
vmovss %xmm0, 384(%rsp,%r15,8)
jmp .LBL_1_7
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_powf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_powf16_core_avx512.S
index 45d48723af..299e6ae236 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_powf16_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_powf16_core_avx512.S
@@ -344,7 +344,7 @@ WRAPPER_IMPL_AVX512_ff _ZGVdN8vv_powf
movzbl %r12b, %r15d
vmovss 1156(%rsp,%r15,8), %xmm0
vmovss 1220(%rsp,%r15,8), %xmm1
- call powf@PLT
+ call JUMPTARGET(powf)
vmovss %xmm0, 1284(%rsp,%r15,8)
jmp .LBL_1_8
@@ -352,7 +352,7 @@ WRAPPER_IMPL_AVX512_ff _ZGVdN8vv_powf
movzbl %r12b, %r15d
vmovss 1152(%rsp,%r15,8), %xmm0
vmovss 1216(%rsp,%r15,8), %xmm1
- call powf@PLT
+ call JUMPTARGET(powf)
vmovss %xmm0, 1280(%rsp,%r15,8)
jmp .LBL_1_7
#endif
@@ -629,7 +629,7 @@ WRAPPER_IMPL_AVX512_ff _ZGVdN8vv_powf
vmovss 1156(%rsp,%r15,8), %xmm1
vzeroupper
vmovss 1092(%rsp,%r15,8), %xmm0
- call powf@PLT
+ call JUMPTARGET(powf)
vmovss %xmm0, 1220(%rsp,%r15,8)
jmp .LBL_2_8
@@ -638,7 +638,7 @@ WRAPPER_IMPL_AVX512_ff _ZGVdN8vv_powf
vmovss 1152(%rsp,%r15,8), %xmm1
vzeroupper
vmovss 1088(%rsp,%r15,8), %xmm0
- call powf@PLT
+ call JUMPTARGET(powf)
vmovss %xmm0, 1216(%rsp,%r15,8)
jmp .LBL_2_7
#endif
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_powf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_powf4_core_sse4.S
index 420f98c6a6..04b4e3d1a1 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_powf4_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_powf4_core_sse4.S
@@ -356,7 +356,7 @@ ENTRY (_ZGVbN4vv_powf_sse4)
movss 68(%rsp,%r15,8), %xmm0
movss 132(%rsp,%r15,8), %xmm1
- call powf@PLT
+ call JUMPTARGET(powf)
movss %xmm0, 196(%rsp,%r15,8)
jmp .LBL_1_8
@@ -366,7 +366,7 @@ ENTRY (_ZGVbN4vv_powf_sse4)
movss 64(%rsp,%r15,8), %xmm0
movss 128(%rsp,%r15,8), %xmm1
- call powf@PLT
+ call JUMPTARGET(powf)
movss %xmm0, 192(%rsp,%r15,8)
jmp .LBL_1_7
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_powf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_powf8_core_avx2.S
index 4446859130..bfe2229348 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_powf8_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_powf8_core_avx2.S
@@ -317,7 +317,7 @@ ENTRY(_ZGVdN8vv_powf_avx2)
vmovss 132(%rsp,%r15,8), %xmm1
vzeroupper
- call powf@PLT
+ call JUMPTARGET(powf)
vmovss %xmm0, 196(%rsp,%r15,8)
jmp .LBL_1_8
@@ -328,7 +328,7 @@ ENTRY(_ZGVdN8vv_powf_avx2)
vmovss 128(%rsp,%r15,8), %xmm1
vzeroupper
- call powf@PLT
+ call JUMPTARGET(powf)
vmovss %xmm0, 192(%rsp,%r15,8)
jmp .LBL_1_7
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf16_core_avx512.S
index 758aeeaeed..e375de8970 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf16_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf16_core_avx512.S
@@ -243,12 +243,12 @@ WRAPPER_IMPL_AVX512_fFF _ZGVdN8vvv_sincosf
movzbl %r12b, %r15d
vmovss 1156(%rsp,%r15,8), %xmm0
- call sinf@PLT
+ call JUMPTARGET(sinf)
vmovss %xmm0, 1220(%rsp,%r15,8)
vmovss 1156(%rsp,%r15,8), %xmm0
- call cosf@PLT
+ call JUMPTARGET(cosf)
vmovss %xmm0, 1284(%rsp,%r15,8)
jmp .LBL_1_8
@@ -257,12 +257,12 @@ WRAPPER_IMPL_AVX512_fFF _ZGVdN8vvv_sincosf
movzbl %r12b, %r15d
vmovss 1152(%rsp,%r15,8), %xmm0
- call sinf@PLT
+ call JUMPTARGET(sinf)
vmovss %xmm0, 1216(%rsp,%r15,8)
vmovss 1152(%rsp,%r15,8), %xmm0
- call cosf@PLT
+ call JUMPTARGET(cosf)
vmovss %xmm0, 1280(%rsp,%r15,8)
jmp .LBL_1_7
@@ -470,12 +470,12 @@ WRAPPER_IMPL_AVX512_fFF _ZGVdN8vvv_sincosf
vzeroupper
vmovss 1156(%rsp,%r15,8), %xmm0
- call sinf@PLT
+ call JUMPTARGET(sinf)
vmovss %xmm0, 1220(%rsp,%r15,8)
vmovss 1156(%rsp,%r15,8), %xmm0
- call cosf@PLT
+ call JUMPTARGET(cosf)
vmovss %xmm0, 1284(%rsp,%r15,8)
jmp .LBL_2_8
@@ -486,12 +486,12 @@ WRAPPER_IMPL_AVX512_fFF _ZGVdN8vvv_sincosf
vzeroupper
vmovss 1152(%rsp,%r15,8), %xmm0
- call sinf@PLT
+ call JUMPTARGET(sinf)
vmovss %xmm0, 1216(%rsp,%r15,8)
vmovss 1152(%rsp,%r15,8), %xmm0
- call cosf@PLT
+ call JUMPTARGET(cosf)
vmovss %xmm0, 1280(%rsp,%r15,8)
jmp .LBL_2_7
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf4_core_sse4.S
index 643fc0ca3b..562367b136 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf4_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf4_core_sse4.S
@@ -241,12 +241,12 @@ ENTRY (_ZGVbN4vvv_sincosf_sse4)
movzbl %r12b, %r15d
movss 132(%rsp,%r15,8), %xmm0
- call sinf@PLT
+ call JUMPTARGET(sinf)
movss %xmm0, 196(%rsp,%r15,8)
movss 132(%rsp,%r15,8), %xmm0
- call cosf@PLT
+ call JUMPTARGET(cosf)
movss %xmm0, 260(%rsp,%r15,8)
jmp .LBL_1_8
@@ -255,12 +255,12 @@ ENTRY (_ZGVbN4vvv_sincosf_sse4)
movzbl %r12b, %r15d
movss 128(%rsp,%r15,8), %xmm0
- call sinf@PLT
+ call JUMPTARGET(sinf)
movss %xmm0, 192(%rsp,%r15,8)
movss 128(%rsp,%r15,8), %xmm0
- call cosf@PLT
+ call JUMPTARGET(cosf)
movss %xmm0, 256(%rsp,%r15,8)
jmp .LBL_1_7
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf8_core_avx2.S
index f2a0ba7116..baf887dd0a 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf8_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf8_core_avx2.S
@@ -213,12 +213,12 @@ ENTRY(_ZGVdN8vvv_sincosf_avx2)
vmovss 260(%rsp,%r15,8), %xmm0
vzeroupper
- call sinf@PLT
+ call JUMPTARGET(sinf)
vmovss %xmm0, 324(%rsp,%r15,8)
vmovss 260(%rsp,%r15,8), %xmm0
- call cosf@PLT
+ call JUMPTARGET(cosf)
vmovss %xmm0, 388(%rsp,%r15,8)
jmp .LBL_1_8
@@ -228,12 +228,12 @@ ENTRY(_ZGVdN8vvv_sincosf_avx2)
vmovss 256(%rsp,%r15,8), %xmm0
vzeroupper
- call sinf@PLT
+ call JUMPTARGET(sinf)
vmovss %xmm0, 320(%rsp,%r15,8)
vmovss 256(%rsp,%r15,8), %xmm0
- call cosf@PLT
+ call JUMPTARGET(cosf)
vmovss %xmm0, 384(%rsp,%r15,8)
jmp .LBL_1_7
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_sinf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_sinf16_core_avx512.S
index 61d8d3793a..121714fbd3 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_sinf16_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_sinf16_core_avx512.S
@@ -229,14 +229,14 @@ WRAPPER_IMPL_AVX512 _ZGVdN8v_sinf
cfi_restore_state
movzbl %r12b, %r15d
vmovss 1156(%rsp,%r15,8), %xmm0
- call sinf@PLT
+ call JUMPTARGET(sinf)
vmovss %xmm0, 1220(%rsp,%r15,8)
jmp .LBL_1_8
.LBL_1_12:
movzbl %r12b, %r15d
vmovss 1152(%rsp,%r15,8), %xmm0
- call sinf@PLT
+ call JUMPTARGET(sinf)
vmovss %xmm0, 1216(%rsp,%r15,8)
jmp .LBL_1_7
#endif
@@ -455,7 +455,7 @@ WRAPPER_IMPL_AVX512 _ZGVdN8v_sinf
vzeroupper
vmovss 1156(%rsp,%r15,8), %xmm0
- call sinf@PLT
+ call JUMPTARGET(sinf)
vmovss %xmm0, 1220(%rsp,%r15,8)
jmp .LBL_2_8
@@ -466,7 +466,7 @@ WRAPPER_IMPL_AVX512 _ZGVdN8v_sinf
vzeroupper
vmovss 1152(%rsp,%r15,8), %xmm0
- call sinf@PLT
+ call JUMPTARGET(sinf)
vmovss %xmm0, 1216(%rsp,%r15,8)
jmp .LBL_2_7
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_sinf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_sinf4_core_sse4.S
index 5268ab1f09..a528d10014 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_sinf4_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_sinf4_core_sse4.S
@@ -207,7 +207,7 @@ ENTRY(_ZGVbN4v_sinf_sse4)
movzbl %r12b, %r15d
movss 196(%rsp,%r15,8), %xmm0
- call sinf@PLT
+ call JUMPTARGET(sinf)
movss %xmm0, 260(%rsp,%r15,8)
jmp .LBL_1_8
@@ -216,7 +216,7 @@ ENTRY(_ZGVbN4v_sinf_sse4)
movzbl %r12b, %r15d
movss 192(%rsp,%r15,8), %xmm0
- call sinf@PLT
+ call JUMPTARGET(sinf)
movss %xmm0, 256(%rsp,%r15,8)
jmp .LBL_1_7
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_sinf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_sinf8_core_avx2.S
index 9fdaadb2e8..425ac7b51e 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_sinf8_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_sinf8_core_avx2.S
@@ -201,7 +201,7 @@ ENTRY(_ZGVdN8v_sinf_avx2)
vmovss 324(%rsp,%r15,8), %xmm0
vzeroupper
- call sinf@PLT
+ call JUMPTARGET(sinf)
vmovss %xmm0, 388(%rsp,%r15,8)
jmp .LBL_1_8
@@ -211,7 +211,7 @@ ENTRY(_ZGVdN8v_sinf_avx2)
vmovss 320(%rsp,%r15,8), %xmm0
vzeroupper
- call sinf@PLT
+ call JUMPTARGET(sinf)
vmovss %xmm0, 384(%rsp,%r15,8)
jmp .LBL_1_7
diff --git a/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h b/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
index 54f4f58371..08206bf836 100644
--- a/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
+++ b/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
@@ -21,10 +21,10 @@
subq $40, %rsp
cfi_adjust_cfa_offset(40)
movaps %xmm0, (%rsp)
- call \callee@PLT
+ call JUMPTARGET(\callee)
movsd %xmm0, 16(%rsp)
movsd 8(%rsp), %xmm0
- call \callee@PLT
+ call JUMPTARGET(\callee)
movsd 16(%rsp), %xmm1
movsd %xmm0, 24(%rsp)
unpcklpd %xmm0, %xmm1
@@ -40,11 +40,11 @@
cfi_adjust_cfa_offset(56)
movaps %xmm0, (%rsp)
movaps %xmm1, 16(%rsp)
- call \callee@PLT
+ call JUMPTARGET(\callee)
movsd %xmm0, 32(%rsp)
movsd 8(%rsp), %xmm0
movsd 24(%rsp), %xmm1
- call \callee@PLT
+ call JUMPTARGET(\callee)
movsd 32(%rsp), %xmm1
movsd %xmm0, 40(%rsp)
unpcklpd %xmm0, %xmm1
@@ -69,7 +69,7 @@
leaq 16(%rsp), %rsi
leaq 24(%rsp), %rdi
movaps %xmm0, (%rsp)
- call \callee@PLT
+ call JUMPTARGET(\callee)
leaq 16(%rsp), %rsi
leaq 24(%rsp), %rdi
movsd 24(%rsp), %xmm0
@@ -79,7 +79,7 @@
movsd 16(%rsp), %xmm0
movsd %xmm0, (%rbx)
movapd %xmm1, %xmm0
- call \callee@PLT
+ call JUMPTARGET(\callee)
movsd 24(%rsp), %xmm0
movsd %xmm0, 8(%rbp)
movsd 16(%rsp), %xmm0
diff --git a/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h b/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
index b1a03be3d9..6c64053dad 100644
--- a/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
+++ b/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
@@ -21,16 +21,16 @@
subq $40, %rsp
cfi_adjust_cfa_offset(40)
movaps %xmm0, (%rsp)
- call \callee@PLT
+ call JUMPTARGET(\callee)
movss %xmm0, 16(%rsp)
movss 4(%rsp), %xmm0
- call \callee@PLT
+ call JUMPTARGET(\callee)
movss %xmm0, 20(%rsp)
movss 8(%rsp), %xmm0
- call \callee@PLT
+ call JUMPTARGET(\callee)
movss %xmm0, 24(%rsp)
movss 12(%rsp), %xmm0
- call \callee@PLT
+ call JUMPTARGET(\callee)
movss 16(%rsp), %xmm3
movss 20(%rsp), %xmm2
movss 24(%rsp), %xmm1
@@ -50,19 +50,19 @@
cfi_adjust_cfa_offset(56)
movaps %xmm0, (%rsp)
movaps %xmm1, 16(%rsp)
- call \callee@PLT
+ call JUMPTARGET(\callee)
movss %xmm0, 32(%rsp)
movss 4(%rsp), %xmm0
movss 20(%rsp), %xmm1
- call \callee@PLT
+ call JUMPTARGET(\callee)
movss %xmm0, 36(%rsp)
movss 8(%rsp), %xmm0
movss 24(%rsp), %xmm1
- call \callee@PLT
+ call JUMPTARGET(\callee)
movss %xmm0, 40(%rsp)
movss 12(%rsp), %xmm0
movss 28(%rsp), %xmm1
- call \callee@PLT
+ call JUMPTARGET(\callee)
movss 32(%rsp), %xmm3
movss 36(%rsp), %xmm2
movss 40(%rsp), %xmm1
@@ -91,7 +91,7 @@
leaq 24(%rsp), %rsi
leaq 28(%rsp), %rdi
movaps %xmm0, (%rsp)
- call \callee@PLT
+ call JUMPTARGET(\callee)
leaq 24(%rsp), %rsi
leaq 28(%rsp), %rdi
movss 28(%rsp), %xmm0
@@ -101,7 +101,7 @@
movss %xmm0, (%rbx)
movaps %xmm1, %xmm0
shufps $85, %xmm1, %xmm0
- call \callee@PLT
+ call JUMPTARGET(\callee)
movss 28(%rsp), %xmm0
leaq 24(%rsp), %rsi
movss %xmm0, 4(%rbp)
@@ -111,7 +111,7 @@
movss %xmm0, 4(%rbx)
movaps %xmm1, %xmm0
unpckhps %xmm1, %xmm0
- call \callee@PLT
+ call JUMPTARGET(\callee)
movaps (%rsp), %xmm1
leaq 24(%rsp), %rsi
leaq 28(%rsp), %rdi
@@ -121,7 +121,7 @@
movss 24(%rsp), %xmm0
movss %xmm0, 8(%rbx)
movaps %xmm1, %xmm0
- call \callee@PLT
+ call JUMPTARGET(\callee)
movss 28(%rsp), %xmm0
movss %xmm0, 12(%rbp)
movss 24(%rsp), %xmm0