1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
|
/* Wrapper implementations of vector math functions.
Copyright (C) 2014-2022 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
/* SSE2 ISA version as wrapper to scalar. */
.macro WRAPPER_IMPL_SSE2 callee
push %rbx
cfi_adjust_cfa_offset (8)
cfi_rel_offset (%rbx, 0)
subq $16, %rsp
cfi_adjust_cfa_offset (16)
movaps %xmm0, (%rsp)
call JUMPTARGET(\callee)
movss %xmm0, (%rsp)
movss 4(%rsp), %xmm0
call JUMPTARGET(\callee)
movss %xmm0, 4(%rsp)
movss 8(%rsp), %xmm0
call JUMPTARGET(\callee)
movd %xmm0, %ebx
movss 12(%rsp), %xmm0
call JUMPTARGET(\callee)
movd %ebx, %xmm1
unpcklps %xmm0, %xmm1
movsd (%rsp), %xmm0
unpcklpd %xmm1, %xmm0
addq $16, %rsp
cfi_adjust_cfa_offset (-16)
popq %rbx
cfi_adjust_cfa_offset (-8)
cfi_restore (%rbx)
ret
.endm
/* 2 argument SSE2 ISA version as wrapper to scalar. */
.macro WRAPPER_IMPL_SSE2_ff callee
push %rbx
cfi_adjust_cfa_offset (8)
cfi_rel_offset (%rbx, 0)
subq $32, %rsp
cfi_adjust_cfa_offset (40)
movaps %xmm0, (%rsp)
movaps %xmm1, 16(%rsp)
call JUMPTARGET(\callee)
movss 20(%rsp), %xmm1
movss %xmm0, 0(%rsp)
movss 4(%rsp), %xmm0
call JUMPTARGET(\callee)
movss 24(%rsp), %xmm1
movss %xmm0, 4(%rsp)
movss 8(%rsp), %xmm0
call JUMPTARGET(\callee)
movss 28(%rsp), %xmm1
movd %xmm0, %ebx
movss 12(%rsp), %xmm0
call JUMPTARGET(\callee)
/* merge 4x results into xmm0. */
movd %ebx, %xmm1
unpcklps %xmm0, %xmm1
movsd (%rsp), %xmm0
unpcklpd %xmm1, %xmm0
addq $32, %rsp
cfi_adjust_cfa_offset (-32)
popq %rbx
cfi_adjust_cfa_offset (-8)
cfi_restore (%rbx)
ret
.endm
/* 3 argument SSE2 ISA version as wrapper to scalar. */
.macro WRAPPER_IMPL_SSE2_fFF callee
pushq %rbp
cfi_adjust_cfa_offset (8)
cfi_rel_offset (%rbp, 0)
pushq %rbx
cfi_adjust_cfa_offset (8)
cfi_rel_offset (%rbx, 0)
movq %rdi, %rbp
movq %rsi, %rbx
subq $24, %rsp
cfi_adjust_cfa_offset (24)
movaps %xmm0, (%rsp)
call JUMPTARGET(\callee)
movss 4(%rsp), %xmm0
leaq 4(%rbp), %rdi
leaq 4(%rbx), %rsi
call JUMPTARGET(\callee)
movss 8(%rsp), %xmm0
leaq 8(%rbp), %rdi
leaq 8(%rbx), %rsi
call JUMPTARGET(\callee)
movss 12(%rsp), %xmm0
leaq 12(%rbp), %rdi
leaq 12(%rbx), %rsi
call JUMPTARGET(\callee)
addq $24, %rsp
cfi_adjust_cfa_offset (-24)
popq %rbx
cfi_adjust_cfa_offset (-8)
cfi_restore (%rbx)
popq %rbp
cfi_adjust_cfa_offset (-8)
cfi_restore (%rbp)
ret
.endm
/* AVX/AVX2 ISA version as wrapper to SSE ISA version. */
.macro WRAPPER_IMPL_AVX callee
pushq %rbp
cfi_adjust_cfa_offset (8)
cfi_rel_offset (%rbp, 0)
movq %rsp, %rbp
cfi_def_cfa_register (%rbp)
andq $-32, %rsp
subq $32, %rsp
vmovaps %ymm0, (%rsp)
vzeroupper
call HIDDEN_JUMPTARGET(\callee)
vmovaps %xmm0, (%rsp)
vmovaps 16(%rsp), %xmm0
call HIDDEN_JUMPTARGET(\callee)
/* combine xmm0 (return of second call) with result of first
call (saved on stack). Might be worth exploring logic that
uses `vpblend` and reads in ymm1 using -16(rsp). */
vmovaps (%rsp), %xmm1
vinsertf128 $1, %xmm0, %ymm1, %ymm0
movq %rbp, %rsp
cfi_def_cfa_register (%rsp)
popq %rbp
cfi_adjust_cfa_offset (-8)
cfi_restore (%rbp)
ret
.endm
/* 2 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */
.macro WRAPPER_IMPL_AVX_ff callee
pushq %rbp
cfi_adjust_cfa_offset (8)
cfi_rel_offset (%rbp, 0)
movq %rsp, %rbp
cfi_def_cfa_register (%rbp)
andq $-32, %rsp
subq $64, %rsp
vmovaps %ymm0, (%rsp)
vmovaps %ymm1, 32(%rsp)
vzeroupper
call HIDDEN_JUMPTARGET(\callee)
vmovaps 48(%rsp), %xmm1
vmovaps %xmm0, (%rsp)
vmovaps 16(%rsp), %xmm0
call HIDDEN_JUMPTARGET(\callee)
/* combine xmm0 (return of second call) with result of first
call (saved on stack). Might be worth exploring logic that
uses `vpblend` and reads in ymm1 using -16(rsp). */
vmovaps (%rsp), %xmm1
vinsertf128 $1, %xmm0, %ymm1, %ymm0
movq %rbp, %rsp
cfi_def_cfa_register (%rsp)
popq %rbp
cfi_adjust_cfa_offset (-8)
cfi_restore (%rbp)
ret
.endm
/* 3 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */
.macro WRAPPER_IMPL_AVX_fFF callee
pushq %rbp
cfi_adjust_cfa_offset (8)
cfi_rel_offset (%rbp, 0)
movq %rsp, %rbp
andq $-32, %rsp
subq $32, %rsp
vmovaps %ymm0, (%rsp)
pushq %rbx
pushq %r14
movq %rdi, %rbx
movq %rsi, %r14
vzeroupper
call HIDDEN_JUMPTARGET(\callee)
vmovaps 32(%rsp), %xmm0
leaq 16(%rbx), %rdi
leaq 16(%r14), %rsi
call HIDDEN_JUMPTARGET(\callee)
popq %r14
popq %rbx
movq %rbp, %rsp
cfi_def_cfa_register (%rsp)
popq %rbp
cfi_adjust_cfa_offset (-8)
cfi_restore (%rbp)
ret
.endm
/* AVX512 ISA version as wrapper to AVX2 ISA version. */
.macro WRAPPER_IMPL_AVX512 callee
pushq %rbp
cfi_adjust_cfa_offset (8)
cfi_rel_offset (%rbp, 0)
movq %rsp, %rbp
cfi_def_cfa_register (%rbp)
andq $-64, %rsp
subq $64, %rsp
vmovups %zmm0, (%rsp)
call HIDDEN_JUMPTARGET(\callee)
vmovupd %ymm0, (%rsp)
vmovupd 32(%rsp), %ymm0
call HIDDEN_JUMPTARGET(\callee)
/* combine ymm0 (return of second call) with result of first
call (saved on stack). */
vmovaps (%rsp), %ymm1
vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0
movq %rbp, %rsp
cfi_def_cfa_register (%rsp)
popq %rbp
cfi_adjust_cfa_offset (-8)
cfi_restore (%rbp)
ret
.endm
/* 2 argument AVX512 ISA version as wrapper to AVX2 ISA version. */
.macro WRAPPER_IMPL_AVX512_ff callee
pushq %rbp
cfi_adjust_cfa_offset (8)
cfi_rel_offset (%rbp, 0)
movq %rsp, %rbp
cfi_def_cfa_register (%rbp)
andq $-64, %rsp
addq $-128, %rsp
vmovups %zmm0, (%rsp)
vmovups %zmm1, 64(%rsp)
/* ymm0 and ymm1 are already set. */
call HIDDEN_JUMPTARGET(\callee)
vmovups 96(%rsp), %ymm1
vmovaps %ymm0, (%rsp)
vmovups 32(%rsp), %ymm0
call HIDDEN_JUMPTARGET(\callee)
/* combine ymm0 (return of second call) with result of first
call (saved on stack). */
vmovaps (%rsp), %ymm1
vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0
movq %rbp, %rsp
cfi_def_cfa_register (%rsp)
popq %rbp
cfi_adjust_cfa_offset (-8)
cfi_restore (%rbp)
ret
.endm
/* 3 argument AVX512 ISA version as wrapper to AVX2 ISA version. */
.macro WRAPPER_IMPL_AVX512_fFF callee
pushq %rbp
cfi_adjust_cfa_offset (8)
cfi_rel_offset (%rbp, 0)
movq %rsp, %rbp
cfi_def_cfa_register (%rbp)
andq $-64, %rsp
subq $64, %rsp
vmovaps %zmm0, (%rsp)
pushq %rbx
pushq %r14
movq %rdi, %rbx
movq %rsi, %r14
/* ymm0 is already set. */
call HIDDEN_JUMPTARGET(\callee)
vmovaps 48(%rsp), %ymm0
leaq 32(%rbx), %rdi
leaq 32(%r14), %rsi
call HIDDEN_JUMPTARGET(\callee)
popq %r14
popq %rbx
movq %rbp, %rsp
cfi_def_cfa_register (%rsp)
popq %rbp
cfi_adjust_cfa_offset (-8)
cfi_restore (%rbp)
ret
.endm
|