aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--ChangeLog7
-rw-r--r--math/libm-test.inc7
-rw-r--r--soft-fp/double.h1
-rw-r--r--soft-fp/extended.h1
-rw-r--r--soft-fp/quad.h1
-rw-r--r--sysdeps/powerpc/fpu/libm-test-ulps32
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/s_ceill.S2
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/s_llroundl.S116
8 files changed, 86 insertions, 81 deletions
diff --git a/ChangeLog b/ChangeLog
index bfcdc51a75..1b7b074b5e 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,10 @@
+2006-01-29 Joseph S. Myers <joseph@codesourcery.com>
+
+ * soft-fp/double.h (FP_CMP_UNORD_D): Also define in
+ [_FP_W_TYPE_SIZE >= 64] case.
+ * soft-fp/extended.h (FP_CMP_UNORD_E): Likewise.
+ * soft-fp/quad.h (FP_CMP_UNORD_Q): Likewise.
+
2006-01-27 Dwayne Grant McConnell <decimal@us.ibm.com>
Jakub Jelinek <jakub@redhat.com>
Roland McGrath <roland@redhat.com>
diff --git a/math/libm-test.inc b/math/libm-test.inc
index 3d72bbfb29..9534596725 100644
--- a/math/libm-test.inc
+++ b/math/libm-test.inc
@@ -3566,6 +3566,13 @@ llround_test (void)
TEST_f_L (llround, -72057594037927936.5L, -72057594037927937LL);
TEST_f_L (llround, -72057594037927936.75L, -72057594037927937LL);
TEST_f_L (llround, -72057594037927937.5L, -72057594037927938LL);
+
+ TEST_f_L (llround, 9223372036854775806.25L, 9223372036854775806LL);
+ TEST_f_L (llround, -9223372036854775806.25L, -9223372036854775806LL);
+ TEST_f_L (llround, 9223372036854775806.5L, 9223372036854775807LL);
+ TEST_f_L (llround, -9223372036854775806.5L, -9223372036854775807LL);
+ TEST_f_L (llround, 9223372036854775807.0L, 9223372036854775807LL);
+ TEST_f_L (llround, -9223372036854775807.0L, -9223372036854775807LL);
#endif
END (llround);
diff --git a/soft-fp/double.h b/soft-fp/double.h
index 77d439b466..4d66c8d457 100644
--- a/soft-fp/double.h
+++ b/soft-fp/double.h
@@ -188,6 +188,7 @@ union _FP_UNION_D
#define FP_CMP_D(r,X,Y,un) _FP_CMP(D,1,r,X,Y,un)
#define FP_CMP_EQ_D(r,X,Y) _FP_CMP_EQ(D,1,r,X,Y)
+#define FP_CMP_UNORD_D(r,X,Y) _FP_CMP_UNORD(D,1,r,X,Y)
#define FP_TO_INT_D(r,X,rsz,rsg) _FP_TO_INT(D,1,r,X,rsz,rsg)
#define FP_FROM_INT_D(X,r,rs,rt) _FP_FROM_INT(D,1,X,r,rs,rt)
diff --git a/soft-fp/extended.h b/soft-fp/extended.h
index a1f54ccceb..d34df5d0ce 100644
--- a/soft-fp/extended.h
+++ b/soft-fp/extended.h
@@ -379,6 +379,7 @@ union _FP_UNION_E
#define FP_CMP_E(r,X,Y,un) _FP_CMP(E,2,r,X,Y,un)
#define FP_CMP_EQ_E(r,X,Y) _FP_CMP_EQ(E,2,r,X,Y)
+#define FP_CMP_UNORD_E(r,X,Y) _FP_CMP_UNORD(E,2,r,X,Y)
#define FP_TO_INT_E(r,X,rsz,rsg) _FP_TO_INT(E,2,r,X,rsz,rsg)
#define FP_FROM_INT_E(X,r,rs,rt) _FP_FROM_INT(E,2,X,r,rs,rt)
diff --git a/soft-fp/quad.h b/soft-fp/quad.h
index b03063bc05..c1dccc49a1 100644
--- a/soft-fp/quad.h
+++ b/soft-fp/quad.h
@@ -195,6 +195,7 @@ union _FP_UNION_Q
#define FP_CMP_Q(r,X,Y,un) _FP_CMP(Q,2,r,X,Y,un)
#define FP_CMP_EQ_Q(r,X,Y) _FP_CMP_EQ(Q,2,r,X,Y)
+#define FP_CMP_UNORD_Q(r,X,Y) _FP_CMP_UNORD(Q,2,r,X,Y)
#define FP_TO_INT_Q(r,X,rsz,rsg) _FP_TO_INT(Q,2,r,X,rsz,rsg)
#define FP_FROM_INT_Q(X,r,rs,rt) _FP_FROM_INT(Q,2,X,r,rs,rt)
diff --git a/sysdeps/powerpc/fpu/libm-test-ulps b/sysdeps/powerpc/fpu/libm-test-ulps
index 46c7666250..1005421ec0 100644
--- a/sysdeps/powerpc/fpu/libm-test-ulps
+++ b/sysdeps/powerpc/fpu/libm-test-ulps
@@ -623,38 +623,6 @@ ifloat: 2
ildouble: 3
ldouble: 3
-# llround
-Test "llround (4503599627370496.5) == 4503599627370497LL":
-ildouble: -1
-ldouble: -1
-Test "llround (4503599627370497.5) == 4503599627370498LL":
-ildouble: -1
-ldouble: -1
-Test "llround (72057594037927935.5) == 72057594037927936LL":
-ildouble: -1
-ldouble: -1
-Test "llround (72057594037927936.5) == 72057594037927937LL":
-ildouble: -1
-ldouble: -1
-Test "llround (72057594037927936.75) == 72057594037927937LL":
-ildouble: -1
-ldouble: -1
-Test "llround (72057594037927937.5) == 72057594037927938LL":
-ildouble: -1
-ldouble: -1
-Test "llround (9007199254740991.5) == 9007199254740992LL":
-ildouble: -1
-ldouble: -1
-Test "llround (9007199254740992.5) == 9007199254740993LL":
-ildouble: -1
-ldouble: -1
-Test "llround (9007199254740992.75) == 9007199254740993LL":
-ildouble: -1
-ldouble: -1
-Test "llround (9007199254740993.5) == 9007199254740994LL":
-ildouble: -1
-ldouble: -1
-
# log10
Test "log10 (0.75) == -0.124938736608299953132449886193870744":
double: 1
diff --git a/sysdeps/powerpc/powerpc64/fpu/s_ceill.S b/sysdeps/powerpc/powerpc64/fpu/s_ceill.S
index e164fec62b..a8f8a0afc5 100644
--- a/sysdeps/powerpc/powerpc64/fpu/s_ceill.S
+++ b/sysdeps/powerpc/powerpc64/fpu/s_ceill.S
@@ -51,7 +51,7 @@ ENTRY (__ceill)
fabs fp9,fp2
fsub fp12,fp13,fp13 /* generate 0.0 */
fcmpu cr7,fp0,fp13 /* if (fabs(x) > TWO52) */
- fcmpo cr6,fp1,fp12 /* if (x > 0.0) */
+ fcmpu cr6,fp1,fp12 /* if (x > 0.0) */
bnl- cr7,.L2
mtfsfi 7,2 /* Set rounding mode toward +inf. */
fneg fp2,fp12
diff --git a/sysdeps/powerpc/powerpc64/fpu/s_llroundl.S b/sysdeps/powerpc/powerpc64/fpu/s_llroundl.S
index b7aeb394f7..29eca11093 100644
--- a/sysdeps/powerpc/powerpc64/fpu/s_llroundl.S
+++ b/sysdeps/powerpc/powerpc64/fpu/s_llroundl.S
@@ -67,79 +67,99 @@
*/
ENTRY (__llroundl)
+ mffs fp7 /* Save current FPU rounding mode. */
fabs fp0,fp1
lfd fp13,.LC2@toc(2) /* 2**52 */
lfd fp12,.LC3@toc(2) /* 2**63 */
lfd fp11,.LC0@toc(2) /* 0.0 */
lfd fp10,.LC1@toc(2) /* 0.5 */
- fcmpu cr0,fp0,fp12 /* if (x < TWO63 */
- fcmpu cr7,fp0,fp13 /* if (x < TWO52 */
- fcmpu cr6,fp1,fp11 /* if (x > 0.0) */
- bge- cr0,.L2
- bge- cr7,.L8
- ble- cr6,.L4
- fadd fp4,fp2,fp10 /* x+= 0.5; */
- fadd fp5,fp1,fp4 /* x+= 0.5; */
-.L9:
- fctidz fp3,fp5 /* Convert To Integer DW llround toward 0. */
- stfd fp3,-16(r1)
+ fabs fp9,fp2
+ fcmpu cr7,fp0,fp13 /* if (fabs(x) > TWO52) */
+ fcmpu cr6,fp1,fp11 /* if (x > 0.0) */
+ bnl- cr7,.L2
+ mtfsfi 7,1 /* Set rounding mode toward 0. */
+ ble- cr6,.L1
+ fadd fp9,fp1,fp10 /* x+= 0.5; */
+ b .L0
+.L1:
+ fsub fp9,fp1,fp10 /* x-= 0.5; */
+.L0:
+ fctid fp0,fp9
+ stfd fp0,-16(r1)
+ mtfsf 0x01,fp7 /* restore previous rounding mode. */
nop /* Insure the following load is in a different dispatch group */
nop /* to avoid pipe stall on POWER4&5. */
nop
ld r3,-16(r1)
blr
-.L4:
- fsub fp4,fp2,fp10 /* x-= 0.5; */
- fadd fp5,fp1,fp4 /* x+= 0.5; */
- b .L9
-.L8:
- ble cr6,.L6
- fneg fp10,fp10
-.L6:
- fadd fp2,fp2,fp10
- fctidz fp3,fp1 /* Convert To Integer DW llround toward 0. */
- fctidz fp4,fp2 /* Convert To Integer DW llround toward 0. */
- stfd fp3,-16(r1)
- stfd fp4,-8(r1)
- nop /* Insure the following load is in a different dispatch group */
- nop /* to avoid pipe stall on POWER4&5. */
- nop
- ld r3,-16(r1)
- ld r0,-8(r1)
- add r3,r3,r0
- blr
-.L2:
-/* The high double is >= TWO63 so it looks like we are "out of range".
- But this may be caused by rounding of the high double and the
- negative low double may bring it back into range. So we need to
- de-round the high double and invert the low double without changing
- the effective long double value. To do this we compute a special
- value (tau) that we can subtract from the high double and add to
- the low double before conversion. The resulting integers can be
- summed to get the total value.
- tau = floor(x_high/TWO52);
+/* The high double is > TWO52 so we need to round the low double and
+ perhaps the high double. In this case we have to round the low
+ double and handle any adjustment to the high double that may be
+ caused by rounding (up). This is complicated by the fact that the
+ high double may already be rounded and the low double may have the
+ opposite sign to compensate.This gets a bit tricky so we use the
+ following algorithm:
+
+ tau = trunc(x_high/TWO52);
x0 = x_high - tau;
- x1 = x_low + tau; */
+ x1 = x_low + tau;
+ r1 = round(x1);
+ y_high = x0 + r1;
+ y_low = x0 - y_high + r1;
+ return y; */
.L2:
+ fcmpu cr7,fp0,fp12 /* if (|x_high| > TWO63) */
+ fcmpu cr0,fp9,fp11 /* || (|x_low| == 0.0) */
+ fmr fp9,fp1
+ fcmpu cr5,fp2,fp11 /* if (x_low > 0.0) */
+ bgt- cr7,.L0 /* return llround(x); */
+ mtfsfi 7,1 /* Set rounding mode toward 0. */
fdiv fp8,fp1,fp13 /* x_high/TWO52 */
- bgt- cr0,.L9 /* if x > TWO63 */
+
+ bng- cr6,.L6 /* if (x > 0.0) */
+ fctidz fp0,fp8
+ fcfid fp8,fp0 /* tau = trunc(x_high/TWO52); */
+ bng cr5,.L4 /* if (x_low > 0.0) */
+ fmr fp3,fp1
+ fmr fp4,fp2
+ b .L5
+.L4: /* if (x_low < 0.0) */
+ fsub fp3,fp1,fp8 /* x0 = x_high - tau; */
+ fadd fp4,fp2,fp8 /* x1 = x_low + tau; */
+.L5:
+ fadd fp5,fp4,fp10 /* r1 = x1 + 0.5; */
+ b .L9
+.L6: /* if (x < 0.0) */
fctidz fp0,fp8
- fcfid fp8,fp0 /* tau = floor(x_high/TWO52); */
+ fcfid fp8,fp0 /* tau = trunc(x_high/TWO52); */
+ bnl cr5,.L7 /* if (x_low < 0.0) */
+ fmr fp3,fp1
+ fmr fp4,fp2
+ b .L8
+.L7: /* if (x_low > 0.0) */
fsub fp3,fp1,fp8 /* x0 = x_high - tau; */
fadd fp4,fp2,fp8 /* x1 = x_low + tau; */
- fctid fp11,fp3
- fctid fp12,fp4
+.L8:
+ fsub fp5,fp4,fp10 /* r1 = x1 - 0.5; */
+.L9:
+ fctid. fp11,fp3
+ fctid fp12,fp5
stfd fp11,-16(r1)
stfd fp12,-8(r1)
+ mtfsf 0x01,fp7 /* restore previous rounding mode. */
nop /* Insure the following load is in a different dispatch group */
nop /* to avoid pipe stall on POWER4&5. */
nop
ld r3,-16(r1)
+ bunlr cr1 /* if not overflow, return. */
ld r0,-8(r1)
addo. r3,r3,r0
- bnslr+ cr0 /* if the sum does not overflow, return. */
- b .L9 /* Otherwise we want to set "invalid operation". */
+ bnslr cr0
+ fmr fp9,fp12
+ bng cr6,.L0
+ fneg fp9,fp12
+ b .L0
END (__llroundl)
strong_alias (__llroundl, __lroundl)