diff options
-rw-r--r-- | sysdeps/x86_64/fpu/multiarch/svml_s_exp2f4_core_sse4.S | 334 |
1 files changed, 166 insertions, 168 deletions
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f4_core_sse4.S index ba91554c7f..047834d52e 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f4_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f4_core_sse4.S @@ -22,7 +22,7 @@ * exp2(x) = 2^n * T[j] * (1 + P(y)) * where * x = m*(1/K) + y, y in [-1/K..1/K] - * m = n*K + j, m,n,j - signed integer, j in [-K/2..K/2] + * m = n*K + j, m, n,j - signed integer, j in [-K/2..K/2] * * values of 2^j/K are tabulated * @@ -43,196 +43,194 @@ /* Offsets for data table __svml_sexp2_data_internal */ -#define _sShifter 0 -#define _sPC0 16 -#define _sPC1 32 -#define _sPC2 48 -#define _sPC3 64 -#define _sPC4 80 -#define _sPC5 96 -#define _sPC6 112 -#define _iAbsMask 128 -#define _iDomainRange 144 +#define _sShifter 0 +#define _sPC0 16 +#define _sPC1 32 +#define _sPC2 48 +#define _sPC3 64 +#define _sPC4 80 +#define _sPC5 96 +#define _sPC6 112 +#define _iAbsMask 128 +#define _iDomainRange 144 #include <sysdep.h> - .text - .section .text.sse4,"ax",@progbits + .section .text.sse4, "ax", @progbits ENTRY(_ZGVbN4v_exp2f_sse4) - subq $72, %rsp - cfi_def_cfa_offset(80) - -/* Check for overflow\underflow */ - movups __svml_sexp2_data_internal(%rip), %xmm1 - -/* Implementation */ - movaps %xmm1, %xmm5 - -/* Polynomial */ - movups _sPC6+__svml_sexp2_data_internal(%rip), %xmm4 - addps %xmm0, %xmm5 - movaps %xmm5, %xmm3 - -/* 2^N */ - pslld $23, %xmm5 - -/* Check for overflow\underflow */ - movdqu _iAbsMask+__svml_sexp2_data_internal(%rip), %xmm2 - subps %xmm1, %xmm3 - -/* R */ - movaps %xmm0, %xmm1 - pand %xmm0, %xmm2 - pcmpgtd _iDomainRange+__svml_sexp2_data_internal(%rip), %xmm2 - subps %xmm3, %xmm1 - movmskps %xmm2, %edx - mulps %xmm1, %xmm4 - addps _sPC5+__svml_sexp2_data_internal(%rip), %xmm4 - mulps %xmm1, %xmm4 - addps _sPC4+__svml_sexp2_data_internal(%rip), %xmm4 - mulps %xmm1, %xmm4 - addps _sPC3+__svml_sexp2_data_internal(%rip), %xmm4 - mulps %xmm1, %xmm4 - addps _sPC2+__svml_sexp2_data_internal(%rip), %xmm4 - mulps %xmm1, %xmm4 - addps _sPC1+__svml_sexp2_data_internal(%rip), %xmm4 - mulps %xmm4, %xmm1 - addps _sPC0+__svml_sexp2_data_internal(%rip), %xmm1 - -/* Reconstruction */ - paddd %xmm5, %xmm1 - testl %edx, %edx - -/* Go to special inputs processing branch */ - jne L(SPECIAL_VALUES_BRANCH) - # LOE rbx rbp r12 r13 r14 r15 edx xmm0 xmm1 - -/* Restore registers - * and exit the function - */ + subq $72, %rsp + cfi_def_cfa_offset(80) + + /* Check for overflow\underflow */ + movups __svml_sexp2_data_internal(%rip), %xmm1 + + /* Implementation */ + movaps %xmm1, %xmm5 + + /* Polynomial */ + movups _sPC6+__svml_sexp2_data_internal(%rip), %xmm4 + addps %xmm0, %xmm5 + movaps %xmm5, %xmm3 + + /* 2^N */ + pslld $23, %xmm5 + + /* Check for overflow\underflow */ + movdqu _iAbsMask+__svml_sexp2_data_internal(%rip), %xmm2 + subps %xmm1, %xmm3 + + /* R */ + movaps %xmm0, %xmm1 + pand %xmm0, %xmm2 + pcmpgtd _iDomainRange+__svml_sexp2_data_internal(%rip), %xmm2 + subps %xmm3, %xmm1 + movmskps %xmm2, %edx + mulps %xmm1, %xmm4 + addps _sPC5+__svml_sexp2_data_internal(%rip), %xmm4 + mulps %xmm1, %xmm4 + addps _sPC4+__svml_sexp2_data_internal(%rip), %xmm4 + mulps %xmm1, %xmm4 + addps _sPC3+__svml_sexp2_data_internal(%rip), %xmm4 + mulps %xmm1, %xmm4 + addps _sPC2+__svml_sexp2_data_internal(%rip), %xmm4 + mulps %xmm1, %xmm4 + addps _sPC1+__svml_sexp2_data_internal(%rip), %xmm4 + mulps %xmm4, %xmm1 + addps _sPC0+__svml_sexp2_data_internal(%rip), %xmm1 + + /* Reconstruction */ + paddd %xmm5, %xmm1 + testl %edx, %edx + + /* Go to special inputs processing branch */ + jne L(SPECIAL_VALUES_BRANCH) + # LOE rbx rbp r12 r13 r14 r15 edx xmm0 xmm1 + + /* Restore registers + * and exit the function + */ L(EXIT): - movaps %xmm1, %xmm0 - addq $72, %rsp - cfi_def_cfa_offset(8) - ret - cfi_def_cfa_offset(80) - -/* Branch to process - * special inputs - */ + movaps %xmm1, %xmm0 + addq $72, %rsp + cfi_def_cfa_offset(8) + ret + cfi_def_cfa_offset(80) + + /* Branch to process + * special inputs + */ L(SPECIAL_VALUES_BRANCH): - movups %xmm0, 32(%rsp) - movups %xmm1, 48(%rsp) - # LOE rbx rbp r12 r13 r14 r15 edx - - xorl %eax, %eax - movq %r12, 16(%rsp) - cfi_offset(12, -64) - movl %eax, %r12d - movq %r13, 8(%rsp) - cfi_offset(13, -72) - movl %edx, %r13d - movq %r14, (%rsp) - cfi_offset(14, -80) - # LOE rbx rbp r15 r12d r13d - -/* Range mask - * bits check - */ + movups %xmm0, 32(%rsp) + movups %xmm1, 48(%rsp) + # LOE rbx rbp r12 r13 r14 r15 edx + + xorl %eax, %eax + movq %r12, 16(%rsp) + cfi_offset(12, -64) + movl %eax, %r12d + movq %r13, 8(%rsp) + cfi_offset(13, -72) + movl %edx, %r13d + movq %r14, (%rsp) + cfi_offset(14, -80) + # LOE rbx rbp r15 r12d r13d + + /* Range mask + * bits check + */ L(RANGEMASK_CHECK): - btl %r12d, %r13d + btl %r12d, %r13d -/* Call scalar math function */ - jc L(SCALAR_MATH_CALL) - # LOE rbx rbp r15 r12d r13d + /* Call scalar math function */ + jc L(SCALAR_MATH_CALL) + # LOE rbx rbp r15 r12d r13d -/* Special inputs - * processing loop - */ + /* Special inputs + * processing loop + */ L(SPECIAL_VALUES_LOOP): - incl %r12d - cmpl $4, %r12d - -/* Check bits in range mask */ - jl L(RANGEMASK_CHECK) - # LOE rbx rbp r15 r12d r13d - - movq 16(%rsp), %r12 - cfi_restore(12) - movq 8(%rsp), %r13 - cfi_restore(13) - movq (%rsp), %r14 - cfi_restore(14) - movups 48(%rsp), %xmm1 - -/* Go to exit */ - jmp L(EXIT) - cfi_offset(12, -64) - cfi_offset(13, -72) - cfi_offset(14, -80) - # LOE rbx rbp r12 r13 r14 r15 xmm1 - -/* Scalar math fucntion call - * to process special input - */ + incl %r12d + cmpl $4, %r12d + + /* Check bits in range mask */ + jl L(RANGEMASK_CHECK) + # LOE rbx rbp r15 r12d r13d + + movq 16(%rsp), %r12 + cfi_restore(12) + movq 8(%rsp), %r13 + cfi_restore(13) + movq (%rsp), %r14 + cfi_restore(14) + movups 48(%rsp), %xmm1 + + /* Go to exit */ + jmp L(EXIT) + cfi_offset(12, -64) + cfi_offset(13, -72) + cfi_offset(14, -80) + # LOE rbx rbp r12 r13 r14 r15 xmm1 + + /* Scalar math fucntion call + * to process special input + */ L(SCALAR_MATH_CALL): - movl %r12d, %r14d - movss 32(%rsp,%r14,4), %xmm0 - call exp2f@PLT - # LOE rbx rbp r14 r15 r12d r13d xmm0 + movl %r12d, %r14d + movss 32(%rsp, %r14, 4), %xmm0 + call exp2f@PLT + # LOE rbx rbp r14 r15 r12d r13d xmm0 - movss %xmm0, 48(%rsp,%r14,4) + movss %xmm0, 48(%rsp, %r14, 4) -/* Process special inputs in loop */ - jmp L(SPECIAL_VALUES_LOOP) - # LOE rbx rbp r15 r12d r13d + /* Process special inputs in loop */ + jmp L(SPECIAL_VALUES_LOOP) + # LOE rbx rbp r15 r12d r13d END(_ZGVbN4v_exp2f_sse4) - .section .rodata, "a" - .align 16 + .section .rodata, "a" + .align 16 #ifdef __svml_sexp2_data_internal_typedef typedef unsigned int VUINT32; -typedef struct -{ - __declspec(align(16)) VUINT32 _sShifter[4][1]; - __declspec(align(16)) VUINT32 _sPC0[4][1]; - __declspec(align(16)) VUINT32 _sPC1[4][1]; - __declspec(align(16)) VUINT32 _sPC2[4][1]; - __declspec(align(16)) VUINT32 _sPC3[4][1]; - __declspec(align(16)) VUINT32 _sPC4[4][1]; - __declspec(align(16)) VUINT32 _sPC5[4][1]; - __declspec(align(16)) VUINT32 _sPC6[4][1]; - __declspec(align(16)) VUINT32 _iAbsMask[4][1]; - __declspec(align(16)) VUINT32 _iDomainRange[4][1]; +typedef struct { + __declspec(align(16)) VUINT32 _sShifter[4][1]; + __declspec(align(16)) VUINT32 _sPC0[4][1]; + __declspec(align(16)) VUINT32 _sPC1[4][1]; + __declspec(align(16)) VUINT32 _sPC2[4][1]; + __declspec(align(16)) VUINT32 _sPC3[4][1]; + __declspec(align(16)) VUINT32 _sPC4[4][1]; + __declspec(align(16)) VUINT32 _sPC5[4][1]; + __declspec(align(16)) VUINT32 _sPC6[4][1]; + __declspec(align(16)) VUINT32 _iAbsMask[4][1]; + __declspec(align(16)) VUINT32 _iDomainRange[4][1]; } __svml_sexp2_data_internal; #endif __svml_sexp2_data_internal: - .long 0x4b400000, 0x4b400000, 0x4b400000, 0x4b400000 /* _sShifter */ - .align 16 - .long 0x3F800000, 0x3F800000, 0x3F800000, 0x3F800000 /* _sPC0 */ - .align 16 - .long 0x3f317218, 0x3f317218, 0x3f317218, 0x3f317218 /* _sPC1 */ - .align 16 - .long 0x3e75fdef, 0x3e75fdef, 0x3e75fdef, 0x3e75fdef /* _sPC2 */ - .align 16 - .long 0x3d6357cf, 0x3d6357cf, 0x3d6357cf, 0x3d6357cf /* _sPC3 */ - .align 16 - .long 0x3c1d962c, 0x3c1d962c, 0x3c1d962c, 0x3c1d962c /* _sPC4 */ - .align 16 - .long 0x3aaf7a51, 0x3aaf7a51, 0x3aaf7a51, 0x3aaf7a51 /* _sPC5 */ - .align 16 - .long 0x39213c8c, 0x39213c8c, 0x39213c8c, 0x39213c8c /* _sPC6 */ - //common - .align 16 - .long 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff /* _iAbsMask */ - .align 16 - .long 0x42fc0000, 0x42fc0000, 0x42fc0000, 0x42fc0000 /* _iDomainRange=126.0 */ - .align 16 - .type __svml_sexp2_data_internal,@object - .size __svml_sexp2_data_internal,.-__svml_sexp2_data_internal + .long 0x4b400000, 0x4b400000, 0x4b400000, 0x4b400000 /* _sShifter */ + .align 16 + .long 0x3F800000, 0x3F800000, 0x3F800000, 0x3F800000 /* _sPC0 */ + .align 16 + .long 0x3f317218, 0x3f317218, 0x3f317218, 0x3f317218 /* _sPC1 */ + .align 16 + .long 0x3e75fdef, 0x3e75fdef, 0x3e75fdef, 0x3e75fdef /* _sPC2 */ + .align 16 + .long 0x3d6357cf, 0x3d6357cf, 0x3d6357cf, 0x3d6357cf /* _sPC3 */ + .align 16 + .long 0x3c1d962c, 0x3c1d962c, 0x3c1d962c, 0x3c1d962c /* _sPC4 */ + .align 16 + .long 0x3aaf7a51, 0x3aaf7a51, 0x3aaf7a51, 0x3aaf7a51 /* _sPC5 */ + .align 16 + .long 0x39213c8c, 0x39213c8c, 0x39213c8c, 0x39213c8c /* _sPC6 */ + //common + .align 16 + .long 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff /* _iAbsMask */ + .align 16 + .long 0x42fc0000, 0x42fc0000, 0x42fc0000, 0x42fc0000 /* _iDomainRange=126.0 */ + .align 16 + .type __svml_sexp2_data_internal, @object + .size __svml_sexp2_data_internal, .-__svml_sexp2_data_internal |