aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--ChangeLog17
-rw-r--r--sysdeps/ia64/fpu/libm-test-ulps12
-rw-r--r--sysdeps/powerpc/bits/atomic.h98
3 files changed, 99 insertions, 28 deletions
diff --git a/ChangeLog b/ChangeLog
index 460a86f2d1..7d4aa835e7 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,20 @@
+2003-03-27 Steven Munroe <sjmunroe@us.ibm.com>
+
+ * sysdeps/powerpc/bits/atomic.h
+ (__arch_compare_and_exchange_bool_32_acq): Move to [!__powerpc64__].
+ [__powerpc64__] (__arch_compare_and_exchange_bool_32_acq):
+ Define PPC64 specific version.
+ [__powerpc64__] (__arch_compare_and_exchange_bool_64_acq):
+ Change (mem) constraint to "b".
+ [__powerpc64__] (__arch_atomic_exchange_and add_64):
+ Replace addi with add. Change (value) contraint to "r".
+ Change (mem) constraint to "b".
+ [__powerpc64__] (__arch_atomic_decrement_if_positive_64): New macro.
+ (__arch_atomic_exchange_32): Change (mem) constraint to "b".
+ (__arch_atomic_exchange_and_add_32): Change (mem) constraint to "b".
+ (__arch_atomic_decrement_if_positive_32): New macro.
+ (atomic_decrement_if_positive): Use __arch* macros.
+
2003-03-27 Jakub Jelinek <jakub@redhat.com>
* sysdeps/ia64/fpu/libm-test-ulps: Update.
diff --git a/sysdeps/ia64/fpu/libm-test-ulps b/sysdeps/ia64/fpu/libm-test-ulps
index 6b5d6c5ef6..2f67d213b4 100644
--- a/sysdeps/ia64/fpu/libm-test-ulps
+++ b/sysdeps/ia64/fpu/libm-test-ulps
@@ -333,11 +333,13 @@ ifloat: 1
Test "Real part of: ctan (-2 - 3 i) == 0.376402564150424829275122113032269084e-2 - 1.00323862735360980144635859782192726 i":
double: 1
idouble: 1
-ildouble: 436
-ldouble: 436
+ildouble: 2
+ldouble: 2
Test "Imaginary part of: ctan (-2 - 3 i) == 0.376402564150424829275122113032269084e-2 - 1.00323862735360980144635859782192726 i":
float: 1
ifloat: 1
+ildouble: 1
+ldouble: 1
Test "Real part of: ctan (0.75 + 1.25 i) == 0.160807785916206426725166058173438663 + 0.975363285031235646193581759755216379 i":
ildouble: 1
ldouble: 1
@@ -968,14 +970,16 @@ ifloat: 1
Function: Real part of "ctan":
double: 1
idouble: 1
-ildouble: 436
-ldouble: 436
+ildouble: 2
+ldouble: 2
Function: Imaginary part of "ctan":
double: 1
float: 1
idouble: 1
ifloat: 1
+ildouble: 1
+ldouble: 1
Function: Real part of "ctanh":
double: 1
diff --git a/sysdeps/powerpc/bits/atomic.h b/sysdeps/powerpc/bits/atomic.h
index bde0ded10b..7028d73135 100644
--- a/sysdeps/powerpc/bits/atomic.h
+++ b/sysdeps/powerpc/bits/atomic.h
@@ -65,27 +65,34 @@ typedef uintmax_t uatomic_max_t;
* Ultimately we should do separate _acq and _rel versions.
*/
+#ifdef __powerpc64__
+
/*
- * XXX this may not work properly on 64-bit if the register
- * containing oldval has the high half non-zero for some reason.
+ * The 32-bit exchange_bool is different on powerpc64 because the subf
+ * does signed 64-bit arthmatic while the lwarx is 32-bit unsigned
+ * (a load word and zero (high 32) form).
+ * In powerpc64 register values are 64-bit by default, including oldval.
+ * Net we need to extend sign word the result of lwarx to 64-bit so the
+ * 64-bit subtract from gives the expected result and sets the condition
+ * correctly.
*/
-#define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \
+# define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \
({ \
unsigned int __tmp; \
__asm __volatile (__ARCH_REL_INSTR "\n" \
"1: lwarx %0,0,%1\n" \
+ " extsw %0,%0\n" \
" subf. %0,%2,%0\n" \
" bne 2f\n" \
" stwcx. %3,0,%1\n" \
" bne- 1b\n" \
"2: " __ARCH_ACQ_INSTR \
: "=&r" (__tmp) \
- : "r" (mem), "r" (oldval), "r" (newval) \
+ : "b" (mem), "r" (oldval), "r" (newval) \
: "cr0", "memory"); \
__tmp != 0; \
})
-#ifdef __powerpc64__
# define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
({ \
unsigned long __tmp; \
@@ -97,7 +104,7 @@ typedef uintmax_t uatomic_max_t;
" bne- 1b\n" \
"2: " __ARCH_ACQ_INSTR \
: "=&r" (__tmp) \
- : "r" (mem), "r" (oldval), "r" (newval) \
+ : "b" (mem), "r" (oldval), "r" (newval) \
: "cr0", "memory"); \
__tmp != 0; \
})
@@ -110,7 +117,7 @@ typedef uintmax_t uatomic_max_t;
" stdcx. %3,0,%2\n" \
" bne- 1b" \
: "=&r" (__val), "=m" (*mem) \
- : "r" (mem), "r" (value), "1" (*mem) \
+ : "b" (mem), "r" (value), "1" (*mem) \
: "cr0"); \
__val; \
})
@@ -119,16 +126,47 @@ typedef uintmax_t uatomic_max_t;
({ \
__typeof (*mem) __val, __tmp; \
__asm __volatile ("1: ldarx %0,0,%3\n" \
- " addi %1,%0,%4\n" \
+ " add %1,%0,%4\n" \
" stdcx. %1,0,%3\n" \
" bne- 1b" \
: "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
- : "r" (mem), "I" (value), "2" (*mem) \
+ : "b" (mem), "r" (value), "2" (*mem) \
: "cr0"); \
__val; \
})
+
+# define __arch_atomic_decrement_if_positive_64(mem) \
+ ({ int __val, __tmp; \
+ __asm __volatile ("1: ldarx %0,0,%3\n" \
+ " cmpdi 0,%0,0\n" \
+ " addi %1,%0,-1\n" \
+ " ble 2f\n" \
+ " stdcx. %1,0,%3\n" \
+ " bne- 1b\n" \
+ "2: " __ARCH_ACQ_INSTR \
+ : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
+ : "b" (mem), "2" (*mem) \
+ : "cr0"); \
+ __val; \
+ })
#else /* powerpc32 */
+# define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \
+({ \
+ unsigned int __tmp; \
+ __asm __volatile (__ARCH_REL_INSTR "\n" \
+ "1: lwarx %0,0,%1\n" \
+ " subf. %0,%2,%0\n" \
+ " bne 2f\n" \
+ " stwcx. %3,0,%1\n" \
+ " bne- 1b\n" \
+ "2: " __ARCH_ACQ_INSTR \
+ : "=&r" (__tmp) \
+ : "b" (mem), "r" (oldval), "r" (newval) \
+ : "cr0", "memory"); \
+ __tmp != 0; \
+})
+
# define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
(abort (), 0)
@@ -136,6 +174,8 @@ typedef uintmax_t uatomic_max_t;
({ abort (); (*mem) = (value); })
# define __arch_atomic_exchange_and_add_64(mem, value) \
({ abort (); (*mem) = (value); })
+# define __arch_atomic_decrement_if_positive_64(mem) \
+ ({ abort (); (*mem) = (value); })
#endif
#define __arch_atomic_exchange_32(mem, value) \
@@ -146,7 +186,7 @@ typedef uintmax_t uatomic_max_t;
" stwcx. %3,0,%2\n" \
" bne- 1b" \
: "=&r" (__val), "=m" (*mem) \
- : "r" (mem), "r" (value), "1" (*mem) \
+ : "b" (mem), "r" (value), "1" (*mem) \
: "cr0"); \
__val; \
})
@@ -159,10 +199,26 @@ typedef uintmax_t uatomic_max_t;
" stwcx. %1,0,%3\n" \
" bne- 1b" \
: "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
- : "r" (mem), "r" (value), "2" (*mem) \
+ : "b" (mem), "r" (value), "2" (*mem) \
: "cr0"); \
__val; \
})
+
+#define __arch_atomic_decrement_if_positive_32(mem) \
+ ({ int __val, __tmp; \
+ __asm __volatile ("1: lwarx %0,0,%3\n" \
+ " cmpwi 0,%0,0\n" \
+ " addi %1,%0,-1\n" \
+ " ble 2f\n" \
+ " stwcx. %1,0,%3\n" \
+ " bne- 1b\n" \
+ "2: " __ARCH_ACQ_INSTR \
+ : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
+ : "b" (mem), "2" (*mem) \
+ : "cr0"); \
+ __val; \
+ })
+
#define atomic_exchange(mem, value) \
({ \
@@ -191,20 +247,14 @@ typedef uintmax_t uatomic_max_t;
/* Decrement *MEM if it is > 0, and return the old value. */
#define atomic_decrement_if_positive(mem) \
- ({ if (sizeof (*mem) != 4) \
+ ({ __typeof (*(mem)) __result; \
+ if (sizeof (*mem) == 4) \
+ __result = __arch_atomic_decrement_if_positive_32 (mem); \
+ else if (sizeof (*mem) == 8) \
+ __result = __arch_atomic_decrement_if_positive_64 (mem); \
+ else \
abort (); \
- int __val, __tmp; \
- __asm __volatile ("1: lwarx %0,0,%3\n" \
- " cmpwi 0,%0,0\n" \
- " addi %1,%0,-1\n" \
- " ble 2f\n" \
- " stwcx. %1,0,%3\n" \
- " bne- 1b\n" \
- "2: " __ARCH_ACQ_INSTR \
- : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
- : "r" (mem), "2" (*mem) \
- : "cr0"); \
- __val; \
+ __result; \
})