aboutsummaryrefslogtreecommitdiff
path: root/sysdeps/x86_64/fpu/multiarch
diff options
context:
space:
mode:
authorAndrew Senkevich <andrew.senkevich@intel.com>2015-06-18 20:11:27 +0300
committerAndrew Senkevich <andrew.senkevich@intel.com>2015-06-18 20:11:27 +0300
commita6336cc446a7ed682cb9dbc47cc56ebf9f9a4229 (patch)
tree3b89c96ee406327a8ad942cb1f4923fe33c0558e /sysdeps/x86_64/fpu/multiarch
parentc9a8c526acd185176e486bee4624039740f8c435 (diff)
downloadglibc-a6336cc446a7ed682cb9dbc47cc56ebf9f9a4229.tar
glibc-a6336cc446a7ed682cb9dbc47cc56ebf9f9a4229.tar.gz
glibc-a6336cc446a7ed682cb9dbc47cc56ebf9f9a4229.tar.bz2
glibc-a6336cc446a7ed682cb9dbc47cc56ebf9f9a4229.zip
Vector sincosf for x86_64 and tests.
Here is implementation of vectorized sincosf containing SSE, AVX, AVX2 and AVX512 versions according to Vector ABI <https://groups.google.com/forum/#!topic/x86-64-abi/LmppCfN1rZ4>. * NEWS: Mention addition of x86_64 vector sincosf. * math/test-float-vlen16.h: Added wrapper for sincosf tests. * math/test-float-vlen4.h: Likewise. * math/test-float-vlen8.h: Likewise. * sysdeps/unix/sysv/linux/x86_64/libmvec.abilist: New symbols added. * sysdeps/x86/fpu/bits/math-vector.h: Added sincosf SIMD declaration. * sysdeps/x86_64/fpu/Makefile (libmvec-support): Added new files. * sysdeps/x86_64/fpu/Versions: New versions added. * sysdeps/x86_64/fpu/libm-test-ulps: Regenerated. * sysdeps/x86_64/fpu/multiarch/Makefile (libmvec-sysdep_routines): Added build of SSE, AVX2 and AVX512 IFUNC versions. * sysdeps/x86_64/fpu/multiarch/svml_s_sincosf16_core.S * sysdeps/x86_64/fpu/multiarch/svml_s_sincosf16_core_avx512.S * sysdeps/x86_64/fpu/multiarch/svml_s_sincosf4_core.S * sysdeps/x86_64/fpu/multiarch/svml_s_sincosf4_core_sse4.S * sysdeps/x86_64/fpu/multiarch/svml_s_sincosf8_core.S * sysdeps/x86_64/fpu/multiarch/svml_s_sincosf8_core_avx2.S * sysdeps/x86_64/fpu/svml_s_sincosf16_core.S * sysdeps/x86_64/fpu/svml_s_sincosf4_core.S * sysdeps/x86_64/fpu/svml_s_sincosf8_core.S * sysdeps/x86_64/fpu/svml_s_sincosf8_core_avx.S * sysdeps/x86_64/fpu/svml_s_sincosf_data.S: New file. * sysdeps/x86_64/fpu/svml_s_sincosf_data.h: New file. * sysdeps/x86_64/fpu/svml_s_wrapper_impl.h: Added 3 argument wrappers. * sysdeps/x86_64/fpu/test-float-vlen16.c: : Vector sincosf tests. * sysdeps/x86_64/fpu/test-float-vlen16-wrappers.c: Likewise. * sysdeps/x86_64/fpu/test-float-vlen4-wrappers.c: Likewise. * sysdeps/x86_64/fpu/test-float-vlen4.c: Likewise. * sysdeps/x86_64/fpu/test-float-vlen8-avx2-wrappers.c: Likewise. * sysdeps/x86_64/fpu/test-float-vlen8-avx2.c: Likewise. * sysdeps/x86_64/fpu/test-float-vlen8-wrappers.c: Likewise. * sysdeps/x86_64/fpu/test-float-vlen8.c: Likewise.
Diffstat (limited to 'sysdeps/x86_64/fpu/multiarch')
-rw-r--r--sysdeps/x86_64/fpu/multiarch/Makefile3
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_s_sincosf16_core.S39
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_s_sincosf16_core_avx512.S504
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_s_sincosf4_core.S38
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_s_sincosf4_core_sse4.S268
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_s_sincosf8_core.S38
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_s_sincosf8_core_avx2.S241
7 files changed, 1130 insertions, 1 deletions
diff --git a/sysdeps/x86_64/fpu/multiarch/Makefile b/sysdeps/x86_64/fpu/multiarch/Makefile
index 9e510db365..86ea473b4f 100644
--- a/sysdeps/x86_64/fpu/multiarch/Makefile
+++ b/sysdeps/x86_64/fpu/multiarch/Makefile
@@ -69,5 +69,6 @@ libmvec-sysdep_routines += svml_d_cos2_core_sse4 svml_d_cos4_core_avx2 \
svml_s_expf16_core_avx512 svml_d_pow2_core_sse4 \
svml_d_pow4_core_avx2 svml_d_pow8_core_avx512 \
svml_s_powf4_core_sse4 svml_s_powf8_core_avx2 \
- svml_s_powf16_core_avx512
+ svml_s_powf16_core_avx512 svml_s_sincosf4_core_sse4 \
+ svml_s_sincosf8_core_avx2 svml_s_sincosf16_core_avx512
endif
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf16_core.S b/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf16_core.S
new file mode 100644
index 0000000000..0a1753eab7
--- /dev/null
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf16_core.S
@@ -0,0 +1,39 @@
+/* Multiple versions of vectorized sincosf.
+ Copyright (C) 2014-2015 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+#include <init-arch.h>
+
+ .text
+ENTRY (_ZGVeN16vvv_sincosf)
+ .type _ZGVeN16vvv_sincosf, @gnu_indirect_function
+ cmpl $0, KIND_OFFSET+__cpu_features(%rip)
+ jne 1
+ call __init_cpu_features
+1: leaq _ZGVeN16vvv_sincosf_skx(%rip), %rax
+ testl $bit_AVX512DQ_Usable, __cpu_features+FEATURE_OFFSET+index_AVX512DQ_Usable(%rip)
+ jnz 3
+2: leaq _ZGVeN16vvv_sincosf_knl(%rip), %rax
+ testl $bit_AVX512F_Usable, __cpu_features+FEATURE_OFFSET+index_AVX512F_Usable(%rip)
+ jnz 3
+ leaq _ZGVeN16vvv_sincosf_avx2_wrapper(%rip), %rax
+3: ret
+END (_ZGVeN16vvv_sincosf)
+
+#define _ZGVeN16vvv_sincosf _ZGVeN16vvv_sincosf_avx2_wrapper
+#include "../svml_s_sincosf16_core.S"
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf16_core_avx512.S
new file mode 100644
index 0000000000..cae49f63a6
--- /dev/null
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf16_core_avx512.S
@@ -0,0 +1,504 @@
+/* Function sincosf vectorized with AVX-512. KNL and SKX versions.
+ Copyright (C) 2014-2015 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+#include "svml_s_sincosf_data.h"
+#include "svml_s_wrapper_impl.h"
+
+/*
+ ALGORITHM DESCRIPTION:
+
+ 1) Range reduction to [-Pi/4; +Pi/4] interval
+ a) Grab sign from source argument and save it.
+ b) Remove sign using AND operation
+ c) Getting octant Y by 2/Pi multiplication
+ d) Add "Right Shifter" value
+ e) Treat obtained value as integer S for destination sign setting.
+ SS = ((S-S&1)&2)<<30; For sin part
+ SC = ((S+S&1)&2)<<30; For cos part
+ f) Change destination sign if source sign is negative
+ using XOR operation.
+ g) Subtract "Right Shifter" (0x4B000000) value
+ h) Subtract Y*(PI/2) from X argument, where PI/2 divided to 4 parts:
+ X = X - Y*PI1 - Y*PI2 - Y*PI3 - Y*PI4;
+ 2) Polynomial (minimax for sin within [-Pi/4; +Pi/4] interval)
+ a) Calculate X^2 = X * X
+ b) Calculate 2 polynomials for sin and cos:
+ RS = X * ( A0 + X^2 * (A1 + x^2 * (A2 + x^2 * (A3))));
+ RC = B0 + X^2 * (B1 + x^2 * (B2 + x^2 * (B3 + x^2 * (B4))));
+ c) Swap RS & RC if if first bit of obtained value after
+ Right Shifting is set to 1. Using And, Andnot & Or operations.
+ 3) Destination sign setting
+ a) Set shifted destination sign using XOR operation:
+ R1 = XOR( RS, SS );
+ R2 = XOR( RC, SC ). */
+
+ .text
+ENTRY (_ZGVeN16vvv_sincosf_knl)
+#ifndef HAVE_AVX512_ASM_SUPPORT
+WRAPPER_IMPL_AVX512_fFF _ZGVdN8vvv_sincosf
+#else
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-64, %rsp
+ subq $1344, %rsp
+ movq __svml_ssincos_data@GOTPCREL(%rip), %rax
+ vmovaps %zmm0, %zmm2
+ movl $-1, %edx
+ vmovups __sAbsMask(%rax), %zmm0
+ vmovups __sInvPI(%rax), %zmm3
+
+/* Absolute argument computation */
+ vpandd %zmm0, %zmm2, %zmm1
+ vmovups __sPI1_FMA(%rax), %zmm5
+ vmovups __sSignMask(%rax), %zmm9
+ vpandnd %zmm2, %zmm0, %zmm0
+
+/* h) Subtract Y*(PI/2) from X argument, where PI/2 divided to 3 parts:
+ X = X - Y*PI1 - Y*PI2 - Y*PI3 */
+ vmovaps %zmm1, %zmm6
+ vmovaps %zmm1, %zmm8
+
+/* c) Getting octant Y by 2/Pi multiplication
+ d) Add "Right Shifter" value */
+ vfmadd213ps __sRShifter(%rax), %zmm1, %zmm3
+ vmovups __sPI3_FMA(%rax), %zmm7
+
+/* g) Subtract "Right Shifter" (0x4B000000) value */
+ vsubps __sRShifter(%rax), %zmm3, %zmm12
+
+/* e) Treat obtained value as integer S for destination sign setting */
+ vpslld $31, %zmm3, %zmm13
+ vmovups __sA7_FMA(%rax), %zmm14
+ vfnmadd231ps %zmm12, %zmm5, %zmm6
+
+/* 2) Polynomial (minimax for sin within [-Pi/4; +Pi/4] interval)
+ a) Calculate X^2 = X * X
+ b) Calculate 2 polynomials for sin and cos:
+ RS = X * ( A0 + X^2 * (A1 + x^2 * (A2 + x^2 * (A3))));
+ RC = B0 + X^2 * (B1 + x^2 * (B2 + x^2 * (B3 + x^2 * (B4)))) */
+ vmovaps %zmm14, %zmm15
+ vmovups __sA9_FMA(%rax), %zmm3
+ vcmpps $22, __sRangeReductionVal(%rax), %zmm1, %k1
+ vpbroadcastd %edx, %zmm1{%k1}{z}
+ vfnmadd231ps __sPI2_FMA(%rax), %zmm12, %zmm6
+ vptestmd %zmm1, %zmm1, %k0
+ vpandd %zmm6, %zmm9, %zmm11
+ kmovw %k0, %ecx
+ vpxord __sOneHalf(%rax), %zmm11, %zmm4
+
+/* Result sign calculations */
+ vpternlogd $150, %zmm13, %zmm9, %zmm11
+
+/* Add correction term 0.5 for cos() part */
+ vaddps %zmm4, %zmm12, %zmm10
+ vfnmadd213ps %zmm6, %zmm7, %zmm12
+ vfnmadd231ps %zmm10, %zmm5, %zmm8
+ vpxord %zmm13, %zmm12, %zmm13
+ vmulps %zmm13, %zmm13, %zmm12
+ vfnmadd231ps __sPI2_FMA(%rax), %zmm10, %zmm8
+ vfmadd231ps __sA9_FMA(%rax), %zmm12, %zmm15
+ vfnmadd213ps %zmm8, %zmm7, %zmm10
+ vfmadd213ps __sA5_FMA(%rax), %zmm12, %zmm15
+ vpxord %zmm11, %zmm10, %zmm5
+ vmulps %zmm5, %zmm5, %zmm4
+ vfmadd213ps __sA3(%rax), %zmm12, %zmm15
+ vfmadd213ps %zmm14, %zmm4, %zmm3
+ vmulps %zmm12, %zmm15, %zmm14
+ vfmadd213ps __sA5_FMA(%rax), %zmm4, %zmm3
+ vfmadd213ps %zmm13, %zmm13, %zmm14
+ vfmadd213ps __sA3(%rax), %zmm4, %zmm3
+ vpxord %zmm0, %zmm14, %zmm0
+ vmulps %zmm4, %zmm3, %zmm3
+ vfmadd213ps %zmm5, %zmm5, %zmm3
+ testl %ecx, %ecx
+ jne .LBL_1_3
+
+.LBL_1_2:
+ cfi_remember_state
+ vmovups %zmm0, (%rdi)
+ vmovups %zmm3, (%rsi)
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
+
+.LBL_1_3:
+ cfi_restore_state
+ vmovups %zmm2, 1152(%rsp)
+ vmovups %zmm0, 1216(%rsp)
+ vmovups %zmm3, 1280(%rsp)
+ je .LBL_1_2
+
+ xorb %dl, %dl
+ kmovw %k4, 1048(%rsp)
+ xorl %eax, %eax
+ kmovw %k5, 1040(%rsp)
+ kmovw %k6, 1032(%rsp)
+ kmovw %k7, 1024(%rsp)
+ vmovups %zmm16, 960(%rsp)
+ vmovups %zmm17, 896(%rsp)
+ vmovups %zmm18, 832(%rsp)
+ vmovups %zmm19, 768(%rsp)
+ vmovups %zmm20, 704(%rsp)
+ vmovups %zmm21, 640(%rsp)
+ vmovups %zmm22, 576(%rsp)
+ vmovups %zmm23, 512(%rsp)
+ vmovups %zmm24, 448(%rsp)
+ vmovups %zmm25, 384(%rsp)
+ vmovups %zmm26, 320(%rsp)
+ vmovups %zmm27, 256(%rsp)
+ vmovups %zmm28, 192(%rsp)
+ vmovups %zmm29, 128(%rsp)
+ vmovups %zmm30, 64(%rsp)
+ vmovups %zmm31, (%rsp)
+ movq %rsi, 1056(%rsp)
+ movq %r12, 1096(%rsp)
+ cfi_offset_rel_rsp (12, 1096)
+ movb %dl, %r12b
+ movq %r13, 1088(%rsp)
+ cfi_offset_rel_rsp (13, 1088)
+ movl %eax, %r13d
+ movq %r14, 1080(%rsp)
+ cfi_offset_rel_rsp (14, 1080)
+ movl %ecx, %r14d
+ movq %r15, 1072(%rsp)
+ cfi_offset_rel_rsp (15, 1072)
+ movq %rbx, 1064(%rsp)
+ movq %rdi, %rbx
+ cfi_remember_state
+
+.LBL_1_6:
+ btl %r13d, %r14d
+ jc .LBL_1_13
+
+.LBL_1_7:
+ lea 1(%r13), %esi
+ btl %esi, %r14d
+ jc .LBL_1_10
+
+.LBL_1_8:
+ addb $1, %r12b
+ addl $2, %r13d
+ cmpb $16, %r12b
+ jb .LBL_1_6
+
+ movq %rbx, %rdi
+ kmovw 1048(%rsp), %k4
+ movq 1056(%rsp), %rsi
+ kmovw 1040(%rsp), %k5
+ movq 1096(%rsp), %r12
+ cfi_restore (%r12)
+ kmovw 1032(%rsp), %k6
+ movq 1088(%rsp), %r13
+ cfi_restore (%r13)
+ kmovw 1024(%rsp), %k7
+ vmovups 960(%rsp), %zmm16
+ vmovups 896(%rsp), %zmm17
+ vmovups 832(%rsp), %zmm18
+ vmovups 768(%rsp), %zmm19
+ vmovups 704(%rsp), %zmm20
+ vmovups 640(%rsp), %zmm21
+ vmovups 576(%rsp), %zmm22
+ vmovups 512(%rsp), %zmm23
+ vmovups 448(%rsp), %zmm24
+ vmovups 384(%rsp), %zmm25
+ vmovups 320(%rsp), %zmm26
+ vmovups 256(%rsp), %zmm27
+ vmovups 192(%rsp), %zmm28
+ vmovups 128(%rsp), %zmm29
+ vmovups 64(%rsp), %zmm30
+ vmovups (%rsp), %zmm31
+ movq 1080(%rsp), %r14
+ cfi_restore (%r14)
+ movq 1072(%rsp), %r15
+ cfi_restore (%r15)
+ movq 1064(%rsp), %rbx
+ vmovups 1216(%rsp), %zmm0
+ vmovups 1280(%rsp), %zmm3
+ jmp .LBL_1_2
+
+.LBL_1_10:
+ cfi_restore_state
+ movzbl %r12b, %r15d
+ vmovss 1156(%rsp,%r15,8), %xmm0
+
+ call sinf@PLT
+
+ vmovss %xmm0, 1220(%rsp,%r15,8)
+ vmovss 1156(%rsp,%r15,8), %xmm0
+
+ call cosf@PLT
+
+ vmovss %xmm0, 1284(%rsp,%r15,8)
+ jmp .LBL_1_8
+
+.LBL_1_13:
+ movzbl %r12b, %r15d
+ vmovss 1152(%rsp,%r15,8), %xmm0
+
+ call sinf@PLT
+
+ vmovss %xmm0, 1216(%rsp,%r15,8)
+ vmovss 1152(%rsp,%r15,8), %xmm0
+
+ call cosf@PLT
+
+ vmovss %xmm0, 1280(%rsp,%r15,8)
+ jmp .LBL_1_7
+#endif
+END (_ZGVeN16vvv_sincosf_knl)
+
+ENTRY (_ZGVeN16vvv_sincosf_skx)
+#ifndef HAVE_AVX512_ASM_SUPPORT
+WRAPPER_IMPL_AVX512_fFF _ZGVdN8vvv_sincosf
+#else
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-64, %rsp
+ subq $1344, %rsp
+ movq __svml_ssincos_data@GOTPCREL(%rip), %rax
+ vmovaps %zmm0, %zmm4
+ vmovups __sAbsMask(%rax), %zmm3
+ vmovups __sInvPI(%rax), %zmm5
+ vmovups __sRShifter(%rax), %zmm6
+ vmovups __sPI1_FMA(%rax), %zmm9
+ vmovups __sPI2_FMA(%rax), %zmm10
+ vmovups __sSignMask(%rax), %zmm14
+ vmovups __sOneHalf(%rax), %zmm7
+ vmovups __sPI3_FMA(%rax), %zmm12
+
+/* Absolute argument computation */
+ vandps %zmm3, %zmm4, %zmm2
+
+/* c) Getting octant Y by 2/Pi multiplication
+ d) Add "Right Shifter" value */
+ vfmadd213ps %zmm6, %zmm2, %zmm5
+ vcmpps $18, __sRangeReductionVal(%rax), %zmm2, %k1
+
+/* e) Treat obtained value as integer S for destination sign setting */
+ vpslld $31, %zmm5, %zmm0
+
+/* g) Subtract "Right Shifter" (0x4B000000) value */
+ vsubps %zmm6, %zmm5, %zmm5
+ vmovups __sA3(%rax), %zmm6
+
+/* h) Subtract Y*(PI/2) from X argument, where PI/2 divided to 3 parts:
+ X = X - Y*PI1 - Y*PI2 - Y*PI3 */
+ vmovaps %zmm2, %zmm11
+ vfnmadd231ps %zmm5, %zmm9, %zmm11
+ vfnmadd231ps %zmm5, %zmm10, %zmm11
+ vandps %zmm11, %zmm14, %zmm1
+ vxorps %zmm1, %zmm7, %zmm8
+
+/* Result sign calculations */
+ vpternlogd $150, %zmm0, %zmm14, %zmm1
+ vmovups .L_2il0floatpacket.13(%rip), %zmm14
+
+/* Add correction term 0.5 for cos() part */
+ vaddps %zmm8, %zmm5, %zmm15
+ vfnmadd213ps %zmm11, %zmm12, %zmm5
+ vandnps %zmm4, %zmm3, %zmm11
+ vmovups __sA7_FMA(%rax), %zmm3
+ vmovaps %zmm2, %zmm13
+ vfnmadd231ps %zmm15, %zmm9, %zmm13
+ vxorps %zmm0, %zmm5, %zmm9
+ vmovups __sA5_FMA(%rax), %zmm0
+ vfnmadd231ps %zmm15, %zmm10, %zmm13
+ vmulps %zmm9, %zmm9, %zmm8
+ vfnmadd213ps %zmm13, %zmm12, %zmm15
+ vmovups __sA9_FMA(%rax), %zmm12
+ vxorps %zmm1, %zmm15, %zmm1
+ vmulps %zmm1, %zmm1, %zmm13
+
+/* 2) Polynomial (minimax for sin within [-Pi/4; +Pi/4] interval)
+ a) Calculate X^2 = X * X
+ b) Calculate 2 polynomials for sin and cos:
+ RS = X * ( A0 + X^2 * (A1 + x^2 * (A2 + x^2 * (A3))));
+ RC = B0 + X^2 * (B1 + x^2 * (B2 + x^2 * (B3 + x^2 * (B4)))) */
+ vmovaps %zmm12, %zmm7
+ vfmadd213ps %zmm3, %zmm8, %zmm7
+ vfmadd213ps %zmm3, %zmm13, %zmm12
+ vfmadd213ps %zmm0, %zmm8, %zmm7
+ vfmadd213ps %zmm0, %zmm13, %zmm12
+ vfmadd213ps %zmm6, %zmm8, %zmm7
+ vfmadd213ps %zmm6, %zmm13, %zmm12
+ vmulps %zmm8, %zmm7, %zmm10
+ vmulps %zmm13, %zmm12, %zmm3
+ vfmadd213ps %zmm9, %zmm9, %zmm10
+ vfmadd213ps %zmm1, %zmm1, %zmm3
+ vxorps %zmm11, %zmm10, %zmm0
+ vpandnd %zmm2, %zmm2, %zmm14{%k1}
+ vptestmd %zmm14, %zmm14, %k0
+ kmovw %k0, %ecx
+ testl %ecx, %ecx
+ jne .LBL_2_3
+
+.LBL_2_2:
+ cfi_remember_state
+ vmovups %zmm0, (%rdi)
+ vmovups %zmm3, (%rsi)
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
+
+.LBL_2_3:
+ cfi_restore_state
+ vmovups %zmm4, 1152(%rsp)
+ vmovups %zmm0, 1216(%rsp)
+ vmovups %zmm3, 1280(%rsp)
+ je .LBL_2_2
+
+ xorb %dl, %dl
+ xorl %eax, %eax
+ kmovw %k4, 1048(%rsp)
+ kmovw %k5, 1040(%rsp)
+ kmovw %k6, 1032(%rsp)
+ kmovw %k7, 1024(%rsp)
+ vmovups %zmm16, 960(%rsp)
+ vmovups %zmm17, 896(%rsp)
+ vmovups %zmm18, 832(%rsp)
+ vmovups %zmm19, 768(%rsp)
+ vmovups %zmm20, 704(%rsp)
+ vmovups %zmm21, 640(%rsp)
+ vmovups %zmm22, 576(%rsp)
+ vmovups %zmm23, 512(%rsp)
+ vmovups %zmm24, 448(%rsp)
+ vmovups %zmm25, 384(%rsp)
+ vmovups %zmm26, 320(%rsp)
+ vmovups %zmm27, 256(%rsp)
+ vmovups %zmm28, 192(%rsp)
+ vmovups %zmm29, 128(%rsp)
+ vmovups %zmm30, 64(%rsp)
+ vmovups %zmm31, (%rsp)
+ movq %rsi, 1056(%rsp)
+ movq %r12, 1096(%rsp)
+ cfi_offset_rel_rsp (12, 1096)
+ movb %dl, %r12b
+ movq %r13, 1088(%rsp)
+ cfi_offset_rel_rsp (13, 1088)
+ movl %eax, %r13d
+ movq %r14, 1080(%rsp)
+ cfi_offset_rel_rsp (14, 1080)
+ movl %ecx, %r14d
+ movq %r15, 1072(%rsp)
+ cfi_offset_rel_rsp (15, 1072)
+ movq %rbx, 1064(%rsp)
+ movq %rdi, %rbx
+ cfi_remember_state
+
+.LBL_2_6:
+ btl %r13d, %r14d
+ jc .LBL_2_13
+
+.LBL_2_7:
+ lea 1(%r13), %esi
+ btl %esi, %r14d
+ jc .LBL_2_10
+
+.LBL_2_8:
+ incb %r12b
+ addl $2, %r13d
+ cmpb $16, %r12b
+ jb .LBL_2_6
+
+ kmovw 1048(%rsp), %k4
+ movq %rbx, %rdi
+ kmovw 1040(%rsp), %k5
+ kmovw 1032(%rsp), %k6
+ kmovw 1024(%rsp), %k7
+ vmovups 960(%rsp), %zmm16
+ vmovups 896(%rsp), %zmm17
+ vmovups 832(%rsp), %zmm18
+ vmovups 768(%rsp), %zmm19
+ vmovups 704(%rsp), %zmm20
+ vmovups 640(%rsp), %zmm21
+ vmovups 576(%rsp), %zmm22
+ vmovups 512(%rsp), %zmm23
+ vmovups 448(%rsp), %zmm24
+ vmovups 384(%rsp), %zmm25
+ vmovups 320(%rsp), %zmm26
+ vmovups 256(%rsp), %zmm27
+ vmovups 192(%rsp), %zmm28
+ vmovups 128(%rsp), %zmm29
+ vmovups 64(%rsp), %zmm30
+ vmovups (%rsp), %zmm31
+ vmovups 1216(%rsp), %zmm0
+ vmovups 1280(%rsp), %zmm3
+ movq 1056(%rsp), %rsi
+ movq 1096(%rsp), %r12
+ cfi_restore (%r12)
+ movq 1088(%rsp), %r13
+ cfi_restore (%r13)
+ movq 1080(%rsp), %r14
+ cfi_restore (%r14)
+ movq 1072(%rsp), %r15
+ cfi_restore (%r15)
+ movq 1064(%rsp), %rbx
+ jmp .LBL_2_2
+
+.LBL_2_10:
+ cfi_restore_state
+ movzbl %r12b, %r15d
+ vmovss 1156(%rsp,%r15,8), %xmm0
+ vzeroupper
+ vmovss 1156(%rsp,%r15,8), %xmm0
+
+ call sinf@PLT
+
+ vmovss %xmm0, 1220(%rsp,%r15,8)
+ vmovss 1156(%rsp,%r15,8), %xmm0
+
+ call cosf@PLT
+
+ vmovss %xmm0, 1284(%rsp,%r15,8)
+ jmp .LBL_2_8
+
+.LBL_2_13:
+ movzbl %r12b, %r15d
+ vmovss 1152(%rsp,%r15,8), %xmm0
+ vzeroupper
+ vmovss 1152(%rsp,%r15,8), %xmm0
+
+ call sinf@PLT
+
+ vmovss %xmm0, 1216(%rsp,%r15,8)
+ vmovss 1152(%rsp,%r15,8), %xmm0
+
+ call cosf@PLT
+
+ vmovss %xmm0, 1280(%rsp,%r15,8)
+ jmp .LBL_2_7
+#endif
+END (_ZGVeN16vvv_sincosf_skx)
+
+ .section .rodata, "a"
+.L_2il0floatpacket.13:
+ .long 0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff
+ .type .L_2il0floatpacket.13,@object
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf4_core.S b/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf4_core.S
new file mode 100644
index 0000000000..610046b587
--- /dev/null
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf4_core.S
@@ -0,0 +1,38 @@
+/* Multiple versions of vectorized sincosf.
+ Copyright (C) 2014-2015 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+#include <init-arch.h>
+
+ .text
+ENTRY (_ZGVbN4vvv_sincosf)
+ .type _ZGVbN4vvv_sincosf, @gnu_indirect_function
+ cmpl $0, KIND_OFFSET+__cpu_features(%rip)
+ jne 1f
+ call __init_cpu_features
+1: leaq _ZGVbN4vvv_sincosf_sse4(%rip), %rax
+ testl $bit_SSE4_1, __cpu_features+CPUID_OFFSET+index_SSE4_1(%rip)
+ jz 2f
+ ret
+2: leaq _ZGVbN4vvv_sincosf_sse2(%rip), %rax
+ ret
+END (_ZGVbN4vvv_sincosf)
+libmvec_hidden_def (_ZGVbN4vvv_sincosf)
+
+#define _ZGVbN4vvv_sincosf _ZGVbN4vvv_sincosf_sse2
+#include "../svml_s_sincosf4_core.S"
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf4_core_sse4.S
new file mode 100644
index 0000000000..8c51e44988
--- /dev/null
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf4_core_sse4.S
@@ -0,0 +1,268 @@
+/* Function sincosf vectorized with SSE4.
+ Copyright (C) 2014-2015 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+#include "svml_s_sincosf_data.h"
+
+ .text
+ENTRY (_ZGVbN4vvv_sincosf_sse4)
+/*
+ ALGORITHM DESCRIPTION:
+
+ 1) Range reduction to [-Pi/4; +Pi/4] interval
+ a) Grab sign from source argument and save it.
+ b) Remove sign using AND operation
+ c) Getting octant Y by 2/Pi multiplication
+ d) Add "Right Shifter" value
+ e) Treat obtained value as integer S for destination sign setting.
+ SS = ((S-S&1)&2)<<30; For sin part
+ SC = ((S+S&1)&2)<<30; For cos part
+ f) Change destination sign if source sign is negative
+ using XOR operation.
+ g) Subtract "Right Shifter" (0x4B000000) value
+ h) Subtract Y*(PI/2) from X argument, where PI/2 divided to 4 parts:
+ X = X - Y*PI1 - Y*PI2 - Y*PI3 - Y*PI4;
+ 2) Polynomial (minimax for sin within [-Pi/4; +Pi/4] interval)
+ a) Calculate X^2 = X * X
+ b) Calculate 2 polynomials for sin and cos:
+ RS = X * ( A0 + X^2 * (A1 + x^2 * (A2 + x^2 * (A3))));
+ RC = B0 + X^2 * (B1 + x^2 * (B2 + x^2 * (B3 + x^2 * (B4))));
+ c) Swap RS & RC if if first bit of obtained value after
+ Right Shifting is set to 1. Using And, Andnot & Or operations.
+ 3) Destination sign setting
+ a) Set shifted destination sign using XOR operation:
+ R1 = XOR( RS, SS );
+ R2 = XOR( RC, SC ). */
+
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-64, %rsp
+ subq $320, %rsp
+ movq __svml_ssincos_data@GOTPCREL(%rip), %rax
+ movups %xmm12, 176(%rsp)
+ movups %xmm9, 160(%rsp)
+ movups __sAbsMask(%rax), %xmm12
+
+/* Absolute argument computation */
+ movaps %xmm12, %xmm5
+ andnps %xmm0, %xmm12
+ movups __sInvPI(%rax), %xmm7
+ andps %xmm0, %xmm5
+
+/* c) Getting octant Y by 2/Pi multiplication
+ d) Add "Right Shifter" value. */
+ mulps %xmm5, %xmm7
+ movups %xmm10, 144(%rsp)
+ movups __sPI1(%rax), %xmm10
+
+/* h) Subtract Y*(PI/2) from X argument, where PI/2 divided to 3 parts:
+ X = X - Y*PI1 - Y*PI2 - Y*PI3. */
+ movaps %xmm10, %xmm1
+ addps __sRShifter(%rax), %xmm7
+
+/* e) Treat obtained value as integer S for destination sign setting */
+ movaps %xmm7, %xmm9
+
+/* g) Subtract "Right Shifter" (0x4B000000) value */
+ subps __sRShifter(%rax), %xmm7
+ mulps %xmm7, %xmm1
+ pslld $31, %xmm9
+ movups __sPI2(%rax), %xmm6
+ movups %xmm13, 112(%rsp)
+ movaps %xmm5, %xmm13
+ movaps %xmm6, %xmm2
+ subps %xmm1, %xmm13
+ mulps %xmm7, %xmm2
+ movups __sSignMask(%rax), %xmm3
+ movaps %xmm5, %xmm1
+ movups __sOneHalf(%rax), %xmm4
+ subps %xmm2, %xmm13
+ cmpnleps __sRangeReductionVal(%rax), %xmm5
+ movaps %xmm3, %xmm2
+ andps %xmm13, %xmm2
+ xorps %xmm2, %xmm4
+
+/* Result sign calculations */
+ xorps %xmm2, %xmm3
+ xorps %xmm9, %xmm3
+
+/* Add correction term 0.5 for cos() part */
+ addps %xmm7, %xmm4
+ movmskps %xmm5, %ecx
+ mulps %xmm4, %xmm10
+ mulps %xmm4, %xmm6
+ subps %xmm10, %xmm1
+ movups __sPI3(%rax), %xmm10
+ subps %xmm6, %xmm1
+ movaps %xmm10, %xmm6
+ mulps %xmm7, %xmm6
+ mulps %xmm4, %xmm10
+ subps %xmm6, %xmm13
+ subps %xmm10, %xmm1
+ movups __sPI4(%rax), %xmm6
+ mulps %xmm6, %xmm7
+ mulps %xmm6, %xmm4
+ subps %xmm7, %xmm13
+ subps %xmm4, %xmm1
+ xorps %xmm9, %xmm13
+ xorps %xmm3, %xmm1
+ movaps %xmm13, %xmm4
+ movaps %xmm1, %xmm2
+ mulps %xmm13, %xmm4
+ mulps %xmm1, %xmm2
+ movups __sA9(%rax), %xmm7
+
+/* 2) Polynomial (minimax for sin within [-Pi/4; +Pi/4] interval)
+ a) Calculate X^2 = X * X
+ b) Calculate 2 polynomials for sin and cos:
+ RS = X * ( A0 + X^2 * (A1 + x^2 * (A2 + x^2 * (A3))));
+ RC = B0 + X^2 * (B1 + x^2 * (B2 + x^2 * (B3 + x^2 * (B4)))) */
+ movaps %xmm7, %xmm3
+ mulps %xmm4, %xmm3
+ mulps %xmm2, %xmm7
+ addps __sA7(%rax), %xmm3
+ addps __sA7(%rax), %xmm7
+ mulps %xmm4, %xmm3
+ mulps %xmm2, %xmm7
+ addps __sA5(%rax), %xmm3
+ addps __sA5(%rax), %xmm7
+ mulps %xmm4, %xmm3
+ mulps %xmm2, %xmm7
+ addps __sA3(%rax), %xmm3
+ addps __sA3(%rax), %xmm7
+ mulps %xmm3, %xmm4
+ mulps %xmm7, %xmm2
+ mulps %xmm13, %xmm4
+ mulps %xmm1, %xmm2
+ addps %xmm4, %xmm13
+ addps %xmm2, %xmm1
+ xorps %xmm12, %xmm13
+ testl %ecx, %ecx
+ jne .LBL_1_3
+
+.LBL_1_2:
+ cfi_remember_state
+ movups 160(%rsp), %xmm9
+ movaps %xmm13, (%rdi)
+ movups 144(%rsp), %xmm10
+ movups 176(%rsp), %xmm12
+ movups 112(%rsp), %xmm13
+ movups %xmm1, (%rsi)
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
+
+.LBL_1_3:
+ cfi_restore_state
+ movups %xmm0, 128(%rsp)
+ movups %xmm13, 192(%rsp)
+ movups %xmm1, 256(%rsp)
+ je .LBL_1_2
+
+ xorb %dl, %dl
+ xorl %eax, %eax
+ movups %xmm8, 48(%rsp)
+ movups %xmm11, 32(%rsp)
+ movups %xmm14, 16(%rsp)
+ movups %xmm15, (%rsp)
+ movq %rsi, 64(%rsp)
+ movq %r12, 104(%rsp)
+ cfi_offset_rel_rsp (12, 104)
+ movb %dl, %r12b
+ movq %r13, 96(%rsp)
+ cfi_offset_rel_rsp (13, 96)
+ movl %eax, %r13d
+ movq %r14, 88(%rsp)
+ cfi_offset_rel_rsp (14, 88)
+ movl %ecx, %r14d
+ movq %r15, 80(%rsp)
+ cfi_offset_rel_rsp (15, 80)
+ movq %rbx, 72(%rsp)
+ movq %rdi, %rbx
+ cfi_remember_state
+
+.LBL_1_6:
+ btl %r13d, %r14d
+ jc .LBL_1_13
+
+.LBL_1_7:
+ lea 1(%r13), %esi
+ btl %esi, %r14d
+ jc .LBL_1_10
+
+.LBL_1_8:
+ incb %r12b
+ addl $2, %r13d
+ cmpb $16, %r12b
+ jb .LBL_1_6
+
+ movups 48(%rsp), %xmm8
+ movq %rbx, %rdi
+ movups 32(%rsp), %xmm11
+ movups 16(%rsp), %xmm14
+ movups (%rsp), %xmm15
+ movq 64(%rsp), %rsi
+ movq 104(%rsp), %r12
+ cfi_restore (%r12)
+ movq 96(%rsp), %r13
+ cfi_restore (%r13)
+ movq 88(%rsp), %r14
+ cfi_restore (%r14)
+ movq 80(%rsp), %r15
+ cfi_restore (%r15)
+ movq 72(%rsp), %rbx
+ movups 192(%rsp), %xmm13
+ movups 256(%rsp), %xmm1
+ jmp .LBL_1_2
+
+.LBL_1_10:
+ cfi_restore_state
+ movzbl %r12b, %r15d
+ movss 132(%rsp,%r15,8), %xmm0
+
+ call sinf@PLT
+
+ movss %xmm0, 196(%rsp,%r15,8)
+ movss 132(%rsp,%r15,8), %xmm0
+
+ call cosf@PLT
+
+ movss %xmm0, 260(%rsp,%r15,8)
+ jmp .LBL_1_8
+
+.LBL_1_13:
+ movzbl %r12b, %r15d
+ movss 128(%rsp,%r15,8), %xmm0
+
+ call sinf@PLT
+
+ movss %xmm0, 192(%rsp,%r15,8)
+ movss 128(%rsp,%r15,8), %xmm0
+
+ call cosf@PLT
+
+ movss %xmm0, 256(%rsp,%r15,8)
+ jmp .LBL_1_7
+
+END (_ZGVbN4vvv_sincosf_sse4)
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf8_core.S b/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf8_core.S
new file mode 100644
index 0000000000..9e5be67fc9
--- /dev/null
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf8_core.S
@@ -0,0 +1,38 @@
+/* Multiple versions of vectorized sincosf.
+ Copyright (C) 2014-2015 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+#include <init-arch.h>
+
+ .text
+ENTRY (_ZGVdN8vvv_sincosf)
+ .type _ZGVdN8vvv_sincosf, @gnu_indirect_function
+ cmpl $0, KIND_OFFSET+__cpu_features(%rip)
+ jne 1f
+ call __init_cpu_features
+1: leaq _ZGVdN8vvv_sincosf_avx2(%rip), %rax
+ testl $bit_AVX2_Usable, __cpu_features+FEATURE_OFFSET+index_AVX2_Usable(%rip)
+ jz 2f
+ ret
+2: leaq _ZGVdN8vvv_sincosf_sse_wrapper(%rip), %rax
+ ret
+END (_ZGVdN8vvv_sincosf)
+libmvec_hidden_def (_ZGVdN8vvv_sincosf)
+
+#define _ZGVdN8vvv_sincosf _ZGVdN8vvv_sincosf_sse_wrapper
+#include "../svml_s_sincosf8_core.S"
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf8_core_avx2.S
new file mode 100644
index 0000000000..153c315799
--- /dev/null
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf8_core_avx2.S
@@ -0,0 +1,241 @@
+/* Function sincosf vectorized with AVX2.
+ Copyright (C) 2014-2015 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+#include "svml_s_sincosf_data.h"
+
+ .text
+ENTRY(_ZGVdN8vvv_sincosf_avx2)
+/*
+ ALGORITHM DESCRIPTION:
+
+ 1) Range reduction to [-Pi/4; +Pi/4] interval
+ a) Grab sign from source argument and save it.
+ b) Remove sign using AND operation
+ c) Getting octant Y by 2/Pi multiplication
+ d) Add "Right Shifter" value
+ e) Treat obtained value as integer S for destination sign setting.
+ SS = ((S-S&1)&2)<<30; For sin part
+ SC = ((S+S&1)&2)<<30; For cos part
+ f) Change destination sign if source sign is negative
+ using XOR operation.
+ g) Subtract "Right Shifter" (0x4B000000) value
+ h) Subtract Y*(PI/2) from X argument, where PI/2 divided to 4 parts:
+ X = X - Y*PI1 - Y*PI2 - Y*PI3 - Y*PI4;
+ 2) Polynomial (minimax for sin within [-Pi/4; +Pi/4] interval)
+ a) Calculate X^2 = X * X
+ b) Calculate 2 polynomials for sin and cos:
+ RS = X * ( A0 + X^2 * (A1 + x^2 * (A2 + x^2 * (A3))));
+ RC = B0 + X^2 * (B1 + x^2 * (B2 + x^2 * (B3 + x^2 * (B4))));
+ c) Swap RS & RC if if first bit of obtained value after
+ Right Shifting is set to 1. Using And, Andnot & Or operations.
+ 3) Destination sign setting
+ a) Set shifted destination sign using XOR operation:
+ R1 = XOR( RS, SS );
+ R2 = XOR( RC, SC ). */
+
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-64, %rsp
+ subq $448, %rsp
+ movq __svml_ssincos_data@GOTPCREL(%rip), %rax
+ vmovdqa %ymm0, %ymm5
+ vmovups %ymm13, 352(%rsp)
+ vmovups __sAbsMask(%rax), %ymm2
+ vmovups __sInvPI(%rax), %ymm1
+ vmovups __sPI1_FMA(%rax), %ymm13
+ vmovups %ymm15, 288(%rsp)
+
+/* Absolute argument computation */
+ vandps %ymm2, %ymm5, %ymm4
+
+/* c) Getting octant Y by 2/Pi multiplication
+ d) Add "Right Shifter" value */
+ vfmadd213ps __sRShifter(%rax), %ymm4, %ymm1
+
+/* e) Treat obtained value as integer S for destination sign setting */
+ vpslld $31, %ymm1, %ymm0
+
+/* g) Subtract "Right Shifter" (0x4B000000) value */
+ vsubps __sRShifter(%rax), %ymm1, %ymm1
+
+/* h) Subtract Y*(PI/2) from X argument, where PI/2 divided to 3 parts:
+ X = X - Y*PI1 - Y*PI2 - Y*PI3 */
+ vmovdqa %ymm4, %ymm7
+ vfnmadd231ps %ymm1, %ymm13, %ymm7
+ vfnmadd231ps __sPI2_FMA(%rax), %ymm1, %ymm7
+ vandps __sSignMask(%rax), %ymm7, %ymm15
+ vxorps __sOneHalf(%rax), %ymm15, %ymm6
+
+/* Add correction term 0.5 for cos() part */
+ vaddps %ymm6, %ymm1, %ymm6
+ vmovdqa %ymm4, %ymm3
+ vfnmadd231ps %ymm6, %ymm13, %ymm3
+ vmovups __sPI3_FMA(%rax), %ymm13
+ vcmpnle_uqps __sRangeReductionVal(%rax), %ymm4, %ymm4
+ vfnmadd231ps __sPI2_FMA(%rax), %ymm6, %ymm3
+ vfnmadd213ps %ymm7, %ymm13, %ymm1
+ vfnmadd213ps %ymm3, %ymm13, %ymm6
+
+/* Result sign calculations */
+ vxorps __sSignMask(%rax), %ymm15, %ymm3
+ vxorps %ymm0, %ymm3, %ymm7
+ vxorps %ymm7, %ymm6, %ymm3
+ vxorps %ymm0, %ymm1, %ymm15
+ vandnps %ymm5, %ymm2, %ymm6
+ vmovups __sA7_FMA(%rax), %ymm2
+ vmulps %ymm15, %ymm15, %ymm13
+ vmovups __sA9_FMA(%rax), %ymm7
+ vmulps %ymm3, %ymm3, %ymm1
+
+/* 2) Polynomial (minimax for sin within [-Pi/4; +Pi/4] interval)
+ a) Calculate X^2 = X * X
+ b) Calculate 2 polynomials for sin and cos:
+ RS = X * ( A0 + X^2 * (A1 + x^2 * (A2 + x^2 * (A3))));
+ RC = B0 + X^2 * (B1 + x^2 * (B2 + x^2 * (B3 + x^2 * (B4)))) */
+ vmovdqa %ymm2, %ymm0
+ vfmadd231ps __sA9_FMA(%rax), %ymm13, %ymm0
+ vfmadd213ps %ymm2, %ymm1, %ymm7
+ vfmadd213ps __sA5_FMA(%rax), %ymm13, %ymm0
+ vfmadd213ps __sA5_FMA(%rax), %ymm1, %ymm7
+ vfmadd213ps __sA3(%rax), %ymm13, %ymm0
+ vfmadd213ps __sA3(%rax), %ymm1, %ymm7
+ vmulps %ymm13, %ymm0, %ymm13
+ vmulps %ymm1, %ymm7, %ymm1
+ vfmadd213ps %ymm15, %ymm15, %ymm13
+ vfmadd213ps %ymm3, %ymm3, %ymm1
+ vmovmskps %ymm4, %ecx
+ vxorps %ymm6, %ymm13, %ymm0
+ testl %ecx, %ecx
+ jne .LBL_1_3
+
+.LBL_1_2:
+ cfi_remember_state
+ vmovups 352(%rsp), %ymm13
+ vmovups 288(%rsp), %ymm15
+ vmovups %ymm0, (%rdi)
+ vmovups %ymm1, (%rsi)
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
+
+.LBL_1_3:
+ cfi_restore_state
+ vmovups %ymm5, 256(%rsp)
+ vmovups %ymm0, 320(%rsp)
+ vmovups %ymm1, 384(%rsp)
+ je .LBL_1_2
+
+ xorb %dl, %dl
+ xorl %eax, %eax
+ vmovups %ymm8, 160(%rsp)
+ vmovups %ymm9, 128(%rsp)
+ vmovups %ymm10, 96(%rsp)
+ vmovups %ymm11, 64(%rsp)
+ vmovups %ymm12, 32(%rsp)
+ vmovups %ymm14, (%rsp)
+ movq %rsi, 192(%rsp)
+ movq %r12, 232(%rsp)
+ cfi_offset_rel_rsp (12, 232)
+ movb %dl, %r12b
+ movq %r13, 224(%rsp)
+ cfi_offset_rel_rsp (13, 224)
+ movl %eax, %r13d
+ movq %r14, 216(%rsp)
+ cfi_offset_rel_rsp (14, 216)
+ movl %ecx, %r14d
+ movq %r15, 208(%rsp)
+ cfi_offset_rel_rsp (14, 208)
+ movq %rbx, 200(%rsp)
+ movq %rdi, %rbx
+ cfi_remember_state
+
+.LBL_1_6:
+ btl %r13d, %r14d
+ jc .LBL_1_13
+
+.LBL_1_7:
+ lea 1(%r13), %esi
+ btl %esi, %r14d
+ jc .LBL_1_10
+
+.LBL_1_8:
+ incb %r12b
+ addl $2, %r13d
+ cmpb $16, %r12b
+ jb .LBL_1_6
+
+ vmovups 160(%rsp), %ymm8
+ movq %rbx, %rdi
+ vmovups 128(%rsp), %ymm9
+ vmovups 96(%rsp), %ymm10
+ vmovups 64(%rsp), %ymm11
+ vmovups 32(%rsp), %ymm12
+ vmovups (%rsp), %ymm14
+ vmovups 320(%rsp), %ymm0
+ vmovups 384(%rsp), %ymm1
+ movq 192(%rsp), %rsi
+ movq 232(%rsp), %r12
+ cfi_restore (%r12)
+ movq 224(%rsp), %r13
+ cfi_restore (%r13)
+ movq 216(%rsp), %r14
+ cfi_restore (%r14)
+ movq 208(%rsp), %r15
+ cfi_restore (%r15)
+ movq 200(%rsp), %rbx
+ jmp .LBL_1_2
+
+.LBL_1_10:
+ cfi_restore_state
+ movzbl %r12b, %r15d
+ vmovss 260(%rsp,%r15,8), %xmm0
+ vzeroupper
+
+ call sinf@PLT
+
+ vmovss %xmm0, 324(%rsp,%r15,8)
+ vmovss 260(%rsp,%r15,8), %xmm0
+
+ call cosf@PLT
+
+ vmovss %xmm0, 388(%rsp,%r15,8)
+ jmp .LBL_1_8
+
+.LBL_1_13:
+ movzbl %r12b, %r15d
+ vmovss 256(%rsp,%r15,8), %xmm0
+ vzeroupper
+
+ call sinf@PLT
+
+ vmovss %xmm0, 320(%rsp,%r15,8)
+ vmovss 256(%rsp,%r15,8), %xmm0
+
+ call cosf@PLT
+
+ vmovss %xmm0, 384(%rsp,%r15,8)
+ jmp .LBL_1_7
+
+END(_ZGVdN8vvv_sincosf_avx2)