aboutsummaryrefslogtreecommitdiff
path: root/sysdeps/x86_64/fpu/math_private.h
diff options
context:
space:
mode:
Diffstat (limited to 'sysdeps/x86_64/fpu/math_private.h')
-rw-r--r--sysdeps/x86_64/fpu/math_private.h38
1 files changed, 8 insertions, 30 deletions
diff --git a/sysdeps/x86_64/fpu/math_private.h b/sysdeps/x86_64/fpu/math_private.h
index 50f765ff2f..8e79718262 100644
--- a/sysdeps/x86_64/fpu/math_private.h
+++ b/sysdeps/x86_64/fpu/math_private.h
@@ -19,7 +19,7 @@
/* We can do a few things better on x86-64. */
-#ifdef __AVX__
+#if defined __AVX__ || defined SSE2AVX
# define MOVD "vmovd"
# define STMXCSR "vstmxcsr"
# define LDMXCSR "vldmxcsr"
@@ -90,7 +90,7 @@
({ int __di; GET_FLOAT_WORD (__di, (float) d); \
(__di & 0x7fffffff) < 0x7f800000; })
-#ifdef __AVX__
+#if defined __AVX__ || defined SSE2AVX
# define __ieee754_sqrt(d) \
({ double __res; \
asm ("vsqrtsd %1, %0, %0" : "=x" (__res) : "xm" ((double) (d))); \
@@ -116,7 +116,7 @@
#ifdef __SSE4_1__
# ifndef __rint
-# ifdef __AVX__
+# if defined __AVX__ || defined SSE2AVX
# define __rint(d) \
({ double __res; \
asm ("vroundsd $4, %1, %0, %0" : "=x" (__res) : "xm" ((double) (d))); \
@@ -129,7 +129,7 @@
# endif
# endif
# ifndef __rintf
-# ifdef __AVX__
+# if defined __AVX__ || defined SSE2AVX
# define __rintf(d) \
({ float __res; \
asm ("vroundss $4, %1, %0, %0" : "=x" (__res) : "xm" ((float) (d))); \
@@ -143,7 +143,7 @@
# endif
# ifndef __floor
-# ifdef __AVX__
+# if defined __AVX__ || defined SSE2AVX
# define __floor(d) \
({ double __res; \
asm ("vroundsd $1, %1, %0, %0" : "=x" (__res) : "xm" ((double) (d))); \
@@ -156,7 +156,7 @@
# endif
# endif
# ifndef __floorf
-# ifdef __AVX__
+# if defined __AVX__ || defined SSE2AVX
# define __floorf(d) \
({ float __res; \
asm ("vroundss $1, %1, %0, %0" : "=x" (__res) : "xm" ((float) (d))); \
@@ -173,29 +173,6 @@
/* Specialized variants of the <fenv.h> interfaces which only handle
either the FPU or the SSE unit. */
-#undef libc_fegetround
-#define libc_fegetround() \
- ({ \
- unsigned int mxcsr; \
- asm volatile (STMXCSR " %0" : "=m" (*&mxcsr)); \
- (mxcsr & 0x6000) >> 3; \
- })
-#undef libc_fegetroundf
-#define libc_fegetroundf() libc_fegetround ()
-// #define libc_fegetroundl() fegetround ()
-
-#undef libc_fesetround
-#define libc_fesetround(r) \
- do { \
- unsigned int mxcsr; \
- asm (STMXCSR " %0" : "=m" (*&mxcsr)); \
- mxcsr = (mxcsr & ~0x6000) | ((r) << 3); \
- asm volatile (LDMXCSR " %0" : : "m" (*&mxcsr)); \
- } while (0)
-#undef libc_fesetroundf
-#define libc_fesetroundf(r) libc_fesetround (r)
-// #define libc_fesetroundl(r) (void) fesetround (r)
-
#undef libc_feholdexcept
#define libc_feholdexcept(e) \
do { \
@@ -224,7 +201,8 @@
#undef libc_fetestexcept
#define libc_fetestexcept(e) \
- ({ unsigned int mxcsr; asm volatile (STMXCSR " %0" : "=m" (*&mxcsr)); \
+ ({ unsigned int mxcsr; \
+ asm volatile (STMXCSR " %0" : "=m" (*&mxcsr)); \
mxcsr & (e) & FE_ALL_EXCEPT; })
#undef libc_fetestexceptf
#define libc_fetestexceptf(e) libc_fetestexcept (e)