aboutsummaryrefslogtreecommitdiff
path: root/math
diff options
context:
space:
mode:
authorJoseph Myers <joseph@codesourcery.com>2018-09-14 13:09:01 +0000
committerJoseph Myers <joseph@codesourcery.com>2018-09-14 13:09:01 +0000
commite44acb20633ab079da00ff0b29d7a5fe624525bc (patch)
treec781c1a4c54d31adf7a14cbca09b9724c5c55f6e /math
parent221e4babca17b363df2c56e839572e9f7ab7d127 (diff)
downloadglibc-e44acb20633ab079da00ff0b29d7a5fe624525bc.tar
glibc-e44acb20633ab079da00ff0b29d7a5fe624525bc.tar.gz
glibc-e44acb20633ab079da00ff0b29d7a5fe624525bc.tar.bz2
glibc-e44acb20633ab079da00ff0b29d7a5fe624525bc.zip
Use floor functions not __floor functions in glibc libm.
Similar to the changes that were made to call sqrt functions directly in glibc, instead of __ieee754_sqrt variants, so that the compiler could inline them automatically without needing special inline definitions in lots of math_private.h headers, this patch makes libm code call floor functions directly instead of __floor variants, removing the inlines / macros for x86_64 (SSE4.1) and powerpc (POWER5). The redirection used to ensure that __ieee754_sqrt does still get called when the compiler doesn't inline a built-in function expansion is refactored so it can be applied to other functions; the refactoring is arranged so it's not limited to unary functions either (it would be reasonable to use this mechanism for copysign - removing the inline in math_private_calls.h but also eliminating unnecessary local PLT entry use in the cases (powerpc soft-float and e500v1, for IBM long double) where copysign calls don't get inlined). The point of this change is that more architectures can get floor calls inlined where they weren't previously (AArch64, for example), without needing special inline definitions in their math_private.h, and existing such definitions in math_private.h headers can be removed. Note that it's possible that in some cases an inline may be used where an IFUNC call was previously used - this is the case on x86_64, for example. I think the direct calls to floor are still appropriate; if there's any significant performance cost from inline SSE2 floor instead of an IFUNC call ending up with SSE4.1 floor, that indicates that either the function should be doing something else that's faster than using floor at all, or it should itself have IFUNC variants, or that the compiler choice of inlining for generic tuning should change to allow for the possibility that, by not inlining, an SSE4.1 IFUNC might be called at runtime - but not that glibc should avoid calling floor internally. (After all, all the same considerations would apply to any user program calling floor, where it might either be inlined or left as an out-of-line call allowing for a possible IFUNC.) Tested for x86_64, and with build-many-glibcs.py. * include/math.h [!_ISOMAC && !(__FINITE_MATH_ONLY__ && __FINITE_MATH_ONLY__ > 0) && !NO_MATH_REDIRECT] (MATH_REDIRECT): New macro. [!_ISOMAC && !(__FINITE_MATH_ONLY__ && __FINITE_MATH_ONLY__ > 0) && !NO_MATH_REDIRECT] (MATH_REDIRECT_LDBL): Likewise. [!_ISOMAC && !(__FINITE_MATH_ONLY__ && __FINITE_MATH_ONLY__ > 0) && !NO_MATH_REDIRECT] (MATH_REDIRECT_F128): Likewise. [!_ISOMAC && !(__FINITE_MATH_ONLY__ && __FINITE_MATH_ONLY__ > 0) && !NO_MATH_REDIRECT] (MATH_REDIRECT_UNARY_ARGS): Likewise. [!_ISOMAC && !(__FINITE_MATH_ONLY__ && __FINITE_MATH_ONLY__ > 0) && !NO_MATH_REDIRECT] (sqrt): Redirect using MATH_REDIRECT. [!_ISOMAC && !(__FINITE_MATH_ONLY__ && __FINITE_MATH_ONLY__ > 0) && !NO_MATH_REDIRECT] (floor): Likewise. * sysdeps/aarch64/fpu/s_floor.c: Define NO_MATH_REDIRECT before header inclusion. * sysdeps/aarch64/fpu/s_floorf.c: Likewise. * sysdeps/ieee754/dbl-64/s_floor.c: Likewise. * sysdeps/ieee754/dbl-64/wordsize-64/s_floor.c: Likewise. * sysdeps/ieee754/float128/s_floorf128.c: Likewise. * sysdeps/ieee754/flt-32/s_floorf.c: Likewise. * sysdeps/ieee754/ldbl-128/s_floorl.c: Likewise. * sysdeps/ieee754/ldbl-128ibm/s_floorl.c: Likewise. * sysdeps/m68k/m680x0/fpu/s_floor_template.c: Likewise. * sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_floor.c: Likewise. * sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_floorf.c: Likewise. * sysdeps/powerpc/powerpc64/fpu/multiarch/s_floor.c: Likewise. * sysdeps/powerpc/powerpc64/fpu/multiarch/s_floorf.c: Likewise. * sysdeps/riscv/rv64/rvd/s_floor.c: Likewise. * sysdeps/riscv/rvf/s_floorf.c: Likewise. * sysdeps/sparc/sparc64/fpu/multiarch/s_floor.c: Likewise. * sysdeps/sparc/sparc64/fpu/multiarch/s_floorf.c: Likewise. * sysdeps/x86_64/fpu/multiarch/s_floor.c: Likewise. * sysdeps/x86_64/fpu/multiarch/s_floorf.c: Likewise. * sysdeps/powerpc/fpu/math_private.h [_ARCH_PWR5X] (__floor): Remove macro. [_ARCH_PWR5X] (__floorf): Likewise. * sysdeps/x86_64/fpu/math_private.h [__SSE4_1__] (__floor): Remove inline function. [__SSE4_1__] (__floorf): Likewise. * math/w_lgamma_main.c (LGFUNC (__lgamma)): Use floor functions instead of __floor variants. * math/w_lgamma_r_compat.c (__lgamma_r): Likewise. * math/w_lgammaf_main.c (LGFUNC (__lgammaf)): Likewise. * math/w_lgammaf_r_compat.c (__lgammaf_r): Likewise. * math/w_lgammal_main.c (LGFUNC (__lgammal)): Likewise. * math/w_lgammal_r_compat.c (__lgammal_r): Likewise. * math/w_tgamma_compat.c (__tgamma): Likewise. * math/w_tgamma_template.c (M_DECL_FUNC (__tgamma)): Likewise. * math/w_tgammaf_compat.c (__tgammaf): Likewise. * math/w_tgammal_compat.c (__tgammal): Likewise. * sysdeps/ieee754/dbl-64/e_lgamma_r.c (sin_pi): Likewise. * sysdeps/ieee754/dbl-64/k_rem_pio2.c (__kernel_rem_pio2): Likewise. * sysdeps/ieee754/dbl-64/lgamma_neg.c (__lgamma_neg): Likewise. * sysdeps/ieee754/flt-32/e_lgammaf_r.c (sin_pif): Likewise. * sysdeps/ieee754/flt-32/lgamma_negf.c (__lgamma_negf): Likewise. * sysdeps/ieee754/ldbl-128/e_lgammal_r.c (__ieee754_lgammal_r): Likewise. * sysdeps/ieee754/ldbl-128/e_powl.c (__ieee754_powl): Likewise. * sysdeps/ieee754/ldbl-128/lgamma_negl.c (__lgamma_negl): Likewise. * sysdeps/ieee754/ldbl-128/s_expm1l.c (__expm1l): Likewise. * sysdeps/ieee754/ldbl-128ibm/e_lgammal_r.c (__ieee754_lgammal_r): Likewise. * sysdeps/ieee754/ldbl-128ibm/e_powl.c (__ieee754_powl): Likewise. * sysdeps/ieee754/ldbl-128ibm/lgamma_negl.c (__lgamma_negl): Likewise. * sysdeps/ieee754/ldbl-128ibm/s_expm1l.c (__expm1l): Likewise. * sysdeps/ieee754/ldbl-128ibm/s_truncl.c (__truncl): Likewise. * sysdeps/ieee754/ldbl-96/e_lgammal_r.c (sin_pi): Likewise. * sysdeps/ieee754/ldbl-96/lgamma_negl.c (__lgamma_negl): Likewise. * sysdeps/powerpc/power5+/fpu/s_modf.c (__modf): Likewise. * sysdeps/powerpc/power5+/fpu/s_modff.c (__modff): Likewise.
Diffstat (limited to 'math')
-rw-r--r--math/w_lgamma_main.c2
-rw-r--r--math/w_lgamma_r_compat.c2
-rw-r--r--math/w_lgammaf_main.c2
-rw-r--r--math/w_lgammaf_r_compat.c2
-rw-r--r--math/w_lgammal_main.c2
-rw-r--r--math/w_lgammal_r_compat.c2
-rw-r--r--math/w_tgamma_compat.c2
-rw-r--r--math/w_tgamma_template.c2
-rw-r--r--math/w_tgammaf_compat.c2
-rw-r--r--math/w_tgammal_compat.c2
10 files changed, 10 insertions, 10 deletions
diff --git a/math/w_lgamma_main.c b/math/w_lgamma_main.c
index ac91b9d9e6..573bcb88fb 100644
--- a/math/w_lgamma_main.c
+++ b/math/w_lgamma_main.c
@@ -31,7 +31,7 @@ LGFUNC (__lgamma) (double x)
if(__builtin_expect(!isfinite(y), 0)
&& isfinite(x) && _LIB_VERSION != _IEEE_)
return __kernel_standard(x, x,
- __floor(x)==x&&x<=0.0
+ floor(x)==x&&x<=0.0
? 15 /* lgamma pole */
: 14); /* lgamma overflow */
diff --git a/math/w_lgamma_r_compat.c b/math/w_lgamma_r_compat.c
index 5b3ceaa2ea..c68ce67174 100644
--- a/math/w_lgamma_r_compat.c
+++ b/math/w_lgamma_r_compat.c
@@ -28,7 +28,7 @@ __lgamma_r(double x, int *signgamp)
if(__builtin_expect(!isfinite(y), 0)
&& isfinite(x) && _LIB_VERSION != _IEEE_)
return __kernel_standard(x, x,
- __floor(x)==x&&x<=0.0
+ floor(x)==x&&x<=0.0
? 15 /* lgamma pole */
: 14); /* lgamma overflow */
diff --git a/math/w_lgammaf_main.c b/math/w_lgammaf_main.c
index ef6065994f..2a85e3cadf 100644
--- a/math/w_lgammaf_main.c
+++ b/math/w_lgammaf_main.c
@@ -28,7 +28,7 @@ LGFUNC (__lgammaf) (float x)
if(__builtin_expect(!isfinite(y), 0)
&& isfinite(x) && _LIB_VERSION != _IEEE_)
return __kernel_standard_f(x, x,
- __floorf(x)==x&&x<=0.0f
+ floorf(x)==x&&x<=0.0f
? 115 /* lgamma pole */
: 114); /* lgamma overflow */
diff --git a/math/w_lgammaf_r_compat.c b/math/w_lgammaf_r_compat.c
index c7751f13f5..78fc4fcb23 100644
--- a/math/w_lgammaf_r_compat.c
+++ b/math/w_lgammaf_r_compat.c
@@ -31,7 +31,7 @@ __lgammaf_r(float x, int *signgamp)
if(__builtin_expect(!isfinite(y), 0)
&& isfinite(x) && _LIB_VERSION != _IEEE_)
return __kernel_standard_f(x, x,
- __floorf(x)==x&&x<=0.0f
+ floorf(x)==x&&x<=0.0f
? 115 /* lgamma pole */
: 114); /* lgamma overflow */
diff --git a/math/w_lgammal_main.c b/math/w_lgammal_main.c
index f269cef6ab..04440cd29f 100644
--- a/math/w_lgammal_main.c
+++ b/math/w_lgammal_main.c
@@ -35,7 +35,7 @@ LGFUNC (__lgammal) (long double x)
if(__builtin_expect(!isfinite(y), 0)
&& isfinite(x) && _LIB_VERSION != _IEEE_)
return __kernel_standard_l(x, x,
- __floorl(x)==x&&x<=0.0L
+ floorl(x)==x&&x<=0.0L
? 215 /* lgamma pole */
: 214); /* lgamma overflow */
diff --git a/math/w_lgammal_r_compat.c b/math/w_lgammal_r_compat.c
index 09a8070b46..6ebfa41333 100644
--- a/math/w_lgammal_r_compat.c
+++ b/math/w_lgammal_r_compat.c
@@ -32,7 +32,7 @@ __lgammal_r(long double x, int *signgamp)
if(__builtin_expect(!isfinite(y), 0)
&& isfinite(x) && _LIB_VERSION != _IEEE_)
return __kernel_standard(x, x,
- __floorl(x)==x&&x<=0.0
+ floorl(x)==x&&x<=0.0
? 215 /* lgamma pole */
: 214); /* lgamma overflow */
diff --git a/math/w_tgamma_compat.c b/math/w_tgamma_compat.c
index 219aa10862..910d2fe490 100644
--- a/math/w_tgamma_compat.c
+++ b/math/w_tgamma_compat.c
@@ -33,7 +33,7 @@ __tgamma(double x)
&& _LIB_VERSION != _IEEE_) {
if (x == 0.0)
return __kernel_standard(x,x,50); /* tgamma pole */
- else if(__floor(x)==x&&x<0.0)
+ else if(floor(x)==x&&x<0.0)
return __kernel_standard(x,x,41); /* tgamma domain */
else if (y == 0)
__set_errno (ERANGE); /* tgamma underflow */
diff --git a/math/w_tgamma_template.c b/math/w_tgamma_template.c
index 032f27a3f7..f570615d7b 100644
--- a/math/w_tgamma_template.c
+++ b/math/w_tgamma_template.c
@@ -41,7 +41,7 @@ M_DECL_FUNC (__tgamma) (FLOAT x)
if (x == 0)
/* Pole error: tgamma(x=0). */
__set_errno (ERANGE);
- else if (M_SUF (__floor) (x) == x && x < 0)
+ else if (M_SUF (floor) (x) == x && x < 0)
/* Domain error: tgamma(integer x<0). */
__set_errno (EDOM);
else
diff --git a/math/w_tgammaf_compat.c b/math/w_tgammaf_compat.c
index e9ffddb796..ed509885ae 100644
--- a/math/w_tgammaf_compat.c
+++ b/math/w_tgammaf_compat.c
@@ -32,7 +32,7 @@ __tgammaf(float x)
if (x == (float)0.0)
/* tgammaf pole */
return __kernel_standard_f(x, x, 150);
- else if(__floorf(x)==x&&x<0.0f)
+ else if(floorf(x)==x&&x<0.0f)
/* tgammaf domain */
return __kernel_standard_f(x, x, 141);
else if (y == 0)
diff --git a/math/w_tgammal_compat.c b/math/w_tgammal_compat.c
index 3695b7fbb3..d79c788dfc 100644
--- a/math/w_tgammal_compat.c
+++ b/math/w_tgammal_compat.c
@@ -36,7 +36,7 @@ __tgammal(long double x)
&& _LIB_VERSION != _IEEE_) {
if(x==0.0)
return __kernel_standard_l(x,x,250); /* tgamma pole */
- else if(__floorl(x)==x&&x<0.0L)
+ else if(floorl(x)==x&&x<0.0L)
return __kernel_standard_l(x,x,241); /* tgamma domain */
else if (y == 0)
__set_errno (ERANGE); /* tgamma underflow */