aboutsummaryrefslogtreecommitdiff
path: root/sysdeps/unix/sysv
diff options
context:
space:
mode:
Diffstat (limited to 'sysdeps/unix/sysv')
-rw-r--r--sysdeps/unix/sysv/linux/i386/lowlevellock.h2
-rw-r--r--sysdeps/unix/sysv/linux/powerpc/elision-lock.c22
-rw-r--r--sysdeps/unix/sysv/linux/powerpc/elision-trylock.c12
-rw-r--r--sysdeps/unix/sysv/linux/powerpc/elision-unlock.c12
-rw-r--r--sysdeps/unix/sysv/linux/powerpc/htm.h12
-rw-r--r--sysdeps/unix/sysv/linux/powerpc/lowlevellock.h6
-rw-r--r--sysdeps/unix/sysv/linux/s390/lowlevellock.h2
-rw-r--r--sysdeps/unix/sysv/linux/x86_64/lowlevellock.h2
8 files changed, 29 insertions, 41 deletions
diff --git a/sysdeps/unix/sysv/linux/i386/lowlevellock.h b/sysdeps/unix/sysv/linux/i386/lowlevellock.h
index 58f5638e37..b8ccd31c0f 100644
--- a/sysdeps/unix/sysv/linux/i386/lowlevellock.h
+++ b/sysdeps/unix/sysv/linux/i386/lowlevellock.h
@@ -317,7 +317,7 @@ extern int __lll_trylock_elision(int *lock, short *adapt_count)
#define lll_lock_elision(futex, adapt_count, private) \
__lll_lock_elision (&(futex), &(adapt_count), private)
-#define lll_unlock_elision(futex, private) \
+#define lll_unlock_elision(futex, adapt_count, private) \
__lll_unlock_elision (&(futex), private)
#define lll_trylock_elision(futex, adapt_count) \
__lll_trylock_elision(&(futex), &(adapt_count))
diff --git a/sysdeps/unix/sysv/linux/powerpc/elision-lock.c b/sysdeps/unix/sysv/linux/powerpc/elision-lock.c
index e11ad1dc21..2a0e5407dd 100644
--- a/sysdeps/unix/sysv/linux/powerpc/elision-lock.c
+++ b/sysdeps/unix/sysv/linux/powerpc/elision-lock.c
@@ -23,27 +23,6 @@
#include <elision-conf.h>
#include "htm.h"
-/* PowerISA 2.0.7 Section B.5.5 defines isync to be insufficient as a
- barrier in acquire mechanism for HTM operations, a strong 'sync' is
- required. */
-#undef __arch_compare_and_exchange_val_32_acq
-#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
- ({ \
- __typeof (*(mem)) __tmp; \
- __typeof (mem) __memp = (mem); \
- __asm __volatile ( \
- "1: lwarx %0,0,%1" MUTEX_HINT_ACQ "\n" \
- " cmpw %0,%2\n" \
- " bne 2f\n" \
- " stwcx. %3,0,%1\n" \
- " bne- 1b\n" \
- "2: sync" \
- : "=&r" (__tmp) \
- : "b" (__memp), "r" (oldval), "r" (newval) \
- : "cr0", "memory"); \
- __tmp; \
- })
-
#if !defined(LLL_LOCK) && !defined(EXTRAARG)
/* Make sure the configuration code is always linked in for static
libraries. */
@@ -68,7 +47,6 @@ __lll_lock_elision (int *lock, short *adapt_count, EXTRAARG int pshared)
{
if (*adapt_count > 0)
{
- (*adapt_count)--;
goto use_lock;
}
diff --git a/sysdeps/unix/sysv/linux/powerpc/elision-trylock.c b/sysdeps/unix/sysv/linux/powerpc/elision-trylock.c
index edec155058..b391116b64 100644
--- a/sysdeps/unix/sysv/linux/powerpc/elision-trylock.c
+++ b/sysdeps/unix/sysv/linux/powerpc/elision-trylock.c
@@ -36,7 +36,6 @@ __lll_trylock_elision (int *futex, short *adapt_count)
/* Only try a transaction if it's worth it. */
if (*adapt_count > 0)
{
- (*adapt_count)--;
goto use_lock;
}
@@ -45,8 +44,12 @@ __lll_trylock_elision (int *futex, short *adapt_count)
if (*futex == 0)
return 0;
- /* Lock was busy. Fall back to normal locking. */
- __libc_tabort (_ABORT_LOCK_BUSY);
+ /* Lock was busy. This is never a nested transaction.
+ End it, and set the adapt count. */
+ __libc_tend (0);
+
+ if (aconf.skip_lock_busy > 0)
+ *adapt_count = aconf.skip_lock_busy;
}
else
{
@@ -58,9 +61,6 @@ __lll_trylock_elision (int *futex, short *adapt_count)
if (aconf.skip_trylock_internal_abort > 0)
*adapt_count = aconf.skip_trylock_internal_abort;
}
-
- if (aconf.skip_lock_busy > 0)
- *adapt_count = aconf.skip_lock_busy;
}
use_lock:
diff --git a/sysdeps/unix/sysv/linux/powerpc/elision-unlock.c b/sysdeps/unix/sysv/linux/powerpc/elision-unlock.c
index 7234db6e2d..4b4ae62d9f 100644
--- a/sysdeps/unix/sysv/linux/powerpc/elision-unlock.c
+++ b/sysdeps/unix/sysv/linux/powerpc/elision-unlock.c
@@ -21,12 +21,20 @@
#include "htm.h"
int
-__lll_unlock_elision(int *lock, int pshared)
+__lll_unlock_elision (int *lock, short *adapt_count, int pshared)
{
/* When the lock was free we're in a transaction. */
if (*lock == 0)
__libc_tend (0);
else
- lll_unlock ((*lock), pshared);
+ {
+ lll_unlock ((*lock), pshared);
+
+ /* Update the adapt count AFTER completing the critical section.
+ Doing this here prevents unneeded stalling when entering
+ a critical section. Saving about 8% runtime on P8. */
+ if (*adapt_count > 0)
+ (*adapt_count)--;
+ }
return 0;
}
diff --git a/sysdeps/unix/sysv/linux/powerpc/htm.h b/sysdeps/unix/sysv/linux/powerpc/htm.h
index 7b49817710..75c99c236b 100644
--- a/sysdeps/unix/sysv/linux/powerpc/htm.h
+++ b/sysdeps/unix/sysv/linux/powerpc/htm.h
@@ -160,10 +160,12 @@
#endif /* __ASSEMBLER__ */
-/* Definitions used for TEXASR Failure code (bits 0:6), they need to be even
- because tabort. always sets the first bit. */
-#define _ABORT_LOCK_BUSY 0x3f /* Lock already used. */
-#define _ABORT_NESTED_TRYLOCK 0x3e /* Write operation in trylock. */
-#define _ABORT_SYSCALL 0x3d /* Syscall issued. */
+/* Definitions used for TEXASR Failure code (bits 0:7). If the failure
+ should be persistent, the abort code must be odd. 0xd0 through 0xff
+ are reserved for the kernel and potential hypervisor. */
+#define _ABORT_PERSISTENT 0x01 /* An unspecified persistent abort. */
+#define _ABORT_LOCK_BUSY 0x34 /* Busy lock, not persistent. */
+#define _ABORT_NESTED_TRYLOCK (0x32 | _ABORT_PERSISTENT)
+#define _ABORT_SYSCALL (0x30 | _ABORT_PERSISTENT)
#endif
diff --git a/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h b/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h
index 67db1dece2..6769c253ce 100644
--- a/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h
+++ b/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h
@@ -32,7 +32,7 @@ extern int __lll_timedlock_elision
extern int __lll_lock_elision (int *futex, short *adapt_count, int private)
attribute_hidden;
-extern int __lll_unlock_elision(int *lock, int private)
+extern int __lll_unlock_elision (int *lock, short *adapt_count, int private)
attribute_hidden;
extern int __lll_trylock_elision(int *lock, short *adapt_count)
@@ -40,8 +40,8 @@ extern int __lll_trylock_elision(int *lock, short *adapt_count)
#define lll_lock_elision(futex, adapt_count, private) \
__lll_lock_elision (&(futex), &(adapt_count), private)
-#define lll_unlock_elision(futex, private) \
- __lll_unlock_elision (&(futex), private)
+#define lll_unlock_elision(futex, adapt_count, private) \
+ __lll_unlock_elision (&(futex), &(adapt_count), private)
#define lll_trylock_elision(futex, adapt_count) \
__lll_trylock_elision (&(futex), &(adapt_count))
diff --git a/sysdeps/unix/sysv/linux/s390/lowlevellock.h b/sysdeps/unix/sysv/linux/s390/lowlevellock.h
index 163a731bd3..cab5f4c7a3 100644
--- a/sysdeps/unix/sysv/linux/s390/lowlevellock.h
+++ b/sysdeps/unix/sysv/linux/s390/lowlevellock.h
@@ -41,7 +41,7 @@ extern int __lll_trylock_elision(int *futex, short *adapt_count)
# define lll_lock_elision(futex, adapt_count, private) \
__lll_lock_elision (&(futex), &(adapt_count), private)
-# define lll_unlock_elision(futex, private) \
+# define lll_unlock_elision(futex, adapt_count, private) \
__lll_unlock_elision (&(futex), private)
# define lll_trylock_elision(futex, adapt_count) \
__lll_trylock_elision(&(futex), &(adapt_count))
diff --git a/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h b/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h
index de525cd4c7..1fbd31e1fa 100644
--- a/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h
+++ b/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h
@@ -342,7 +342,7 @@ extern int __lll_trylock_elision (int *lock, short *adapt_count)
#define lll_lock_elision(futex, adapt_count, private) \
__lll_lock_elision (&(futex), &(adapt_count), private)
-#define lll_unlock_elision(futex, private) \
+#define lll_unlock_elision(futex, adapt_count, private) \
__lll_unlock_elision (&(futex), private)
#define lll_trylock_elision(futex, adapt_count) \
__lll_trylock_elision (&(futex), &(adapt_count))