aboutsummaryrefslogtreecommitdiff
path: root/sysdeps/x86_64
diff options
context:
space:
mode:
authorAndreas Jaeger <jaegerandi@gmail.com>2012-05-09 20:17:21 +0200
committerAndreas Jaeger <jaegerandi@gmail.com>2012-05-09 20:17:21 +0200
commitbdd74070cc94ca50f1096808977268981308d7d6 (patch)
tree0fb127e5c545095be68437660edf024ee5e94bf4 /sysdeps/x86_64
parent91d8d69ec6bc407af06125c4c98f70e3a43f3b38 (diff)
downloadglibc-bdd74070cc94ca50f1096808977268981308d7d6.tar
glibc-bdd74070cc94ca50f1096808977268981308d7d6.tar.gz
glibc-bdd74070cc94ca50f1096808977268981308d7d6.tar.bz2
glibc-bdd74070cc94ca50f1096808977268981308d7d6.zip
Add volatiles for x86-64 bits/mathinline.h
[BZ #14053] GCC 4.7 might remove consecutive calls to e.g. lrintf since the assembler instructions are the same and GCC does not know that the result is different depending on the rounding mode. For SSE instructions, the control register is not available so there is no way to inform GCC about this. Therefore the asms are marked as volatile.
Diffstat (limited to 'sysdeps/x86_64')
-rw-r--r--sysdeps/x86_64/fpu/bits/mathinline.h48
1 files changed, 40 insertions, 8 deletions
diff --git a/sysdeps/x86_64/fpu/bits/mathinline.h b/sysdeps/x86_64/fpu/bits/mathinline.h
index c072f16a21..49a199b60b 100644
--- a/sysdeps/x86_64/fpu/bits/mathinline.h
+++ b/sysdeps/x86_64/fpu/bits/mathinline.h
@@ -79,7 +79,11 @@ __MATH_INLINE long int
__NTH (lrintf (float __x))
{
long int __res;
- __asm ("cvtss2si %1, %0" : "=r" (__res) : "xm" (__x));
+ /* Mark as volatile since the result is dependend on the state of
+ the SSE control register (the rounding mode). Otherwise GCC might
+ remove these assembler instructions since it does not know about
+ the rounding mode change and cannot currently be told. */
+ __asm __volatile__ ("cvtss2si %1, %0" : "=r" (__res) : "xm" (__x));
return __res;
}
# endif
@@ -88,7 +92,11 @@ __MATH_INLINE long int
__NTH (lrint (double __x))
{
long int __res;
- __asm ("cvtsd2si %1, %0" : "=r" (__res) : "xm" (__x));
+ /* Mark as volatile since the result is dependend on the state of
+ the SSE control register (the rounding mode). Otherwise GCC might
+ remove these assembler instructions since it does not know about
+ the rounding mode change and cannot currently be told. */
+ __asm __volatile__ ("cvtsd2si %1, %0" : "=r" (__res) : "xm" (__x));
return __res;
}
# endif
@@ -97,14 +105,22 @@ __MATH_INLINE long long int
__NTH (llrintf (float __x))
{
long long int __res;
- __asm ("cvtss2si %1, %0" : "=r" (__res) : "xm" (__x));
+ /* Mark as volatile since the result is dependend on the state of
+ the SSE control register (the rounding mode). Otherwise GCC might
+ remove these assembler instructions since it does not know about
+ the rounding mode change and cannot currently be told. */
+ __asm __volatile__ ("cvtss2si %1, %0" : "=r" (__res) : "xm" (__x));
return __res;
}
__MATH_INLINE long long int
__NTH (llrint (double __x))
{
long long int __res;
- __asm ("cvtsd2si %1, %0" : "=r" (__res) : "xm" (__x));
+ /* Mark as volatile since the result is dependend on the state of
+ the SSE control register (the rounding mode). Otherwise GCC might
+ remove these assembler instructions since it does not know about
+ the rounding mode change and cannot currently be told. */
+ __asm __volatile__ ("cvtsd2si %1, %0" : "=r" (__res) : "xm" (__x));
return __res;
}
# endif
@@ -176,14 +192,22 @@ __MATH_INLINE double
__NTH (rint (double __x))
{
double __res;
- __asm ("roundsd $4, %1, %0" : "=x" (__res) : "xm" (__x));
+ /* Mark as volatile since the result is dependend on the state of
+ the SSE control register (the rounding mode). Otherwise GCC might
+ remove these assembler instructions since it does not know about
+ the rounding mode change and cannot currently be told. */
+ __asm __volatile__ ("roundsd $4, %1, %0" : "=x" (__res) : "xm" (__x));
return __res;
}
__MATH_INLINE float
__NTH (rintf (float __x))
{
float __res;
- __asm ("roundss $4, %1, %0" : "=x" (__res) : "xm" (__x));
+ /* Mark as volatile since the result is dependend on the state of
+ the SSE control register (the rounding mode). Otherwise GCC might
+ remove these assembler instructions since it does not know about
+ the rounding mode change and cannot currently be told. */
+ __asm __volatile__ ("roundss $4, %1, %0" : "=x" (__res) : "xm" (__x));
return __res;
}
@@ -193,14 +217,22 @@ __MATH_INLINE double
__NTH (nearbyint (double __x))
{
double __res;
- __asm ("roundsd $0xc, %1, %0" : "=x" (__res) : "xm" (__x));
+ /* Mark as volatile since the result is dependend on the state of
+ the SSE control register (the rounding mode). Otherwise GCC might
+ remove these assembler instructions since it does not know about
+ the rounding mode change and cannot currently be told. */
+ __asm __volatile__ ("roundsd $0xc, %1, %0" : "=x" (__res) : "xm" (__x));
return __res;
}
__MATH_INLINE float
__NTH (nearbyintf (float __x))
{
float __res;
- __asm ("roundss $0xc, %1, %0" : "=x" (__res) : "xm" (__x));
+ /* Mark as volatile since the result is dependend on the state of
+ the SSE control register (the rounding mode). Otherwise GCC might
+ remove these assembler instructions since it does not know about
+ the rounding mode change and cannot currently be told. */
+ __asm __volatile__ ("roundss $0xc, %1, %0" : "=x" (__res) : "xm" (__x));
return __res;
}
# endif