aboutsummaryrefslogtreecommitdiff
path: root/nptl/pthread_mutex_unlock.c
diff options
context:
space:
mode:
authorAndi Kleen <ak@linux.intel.com>2012-12-22 01:03:04 -0800
committerAndi Kleen <ak@linux.intel.com>2013-07-02 08:46:55 -0700
commite8c659d74e011346785355eeef03b7fb6f533c61 (patch)
tree7791d2e0769dc19ff7b549be745e4ddc251c3b7a /nptl/pthread_mutex_unlock.c
parent68cc29355f3334c7ad18f648ff9a6383a0916d23 (diff)
downloadglibc-e8c659d74e011346785355eeef03b7fb6f533c61.tar
glibc-e8c659d74e011346785355eeef03b7fb6f533c61.tar.gz
glibc-e8c659d74e011346785355eeef03b7fb6f533c61.tar.bz2
glibc-e8c659d74e011346785355eeef03b7fb6f533c61.zip
Add elision to pthread_mutex_{try,timed,un}lock
Add elision paths to the basic mutex locks. The normal path has a check for RTM and upgrades the lock to RTM when available. Trylocks cannot automatically upgrade, so they check for elision every time. We use a 4 byte value in the mutex to store the lock elision adaptation state. This is separate from the adaptive spin state and uses a separate field. Condition variables currently do not support elision. Recursive mutexes and condition variables may be supported at some point, but are not in the current implementation. Also "trylock" will not automatically enable elision unless some other lock call has been already called on the lock. This version does not use IFUNC, so it means every lock has one additional check for elision. Benchmarking showed the overhead to be negligible.
Diffstat (limited to 'nptl/pthread_mutex_unlock.c')
-rw-r--r--nptl/pthread_mutex_unlock.c21
1 files changed, 17 insertions, 4 deletions
diff --git a/nptl/pthread_mutex_unlock.c b/nptl/pthread_mutex_unlock.c
index c0249f76ea..6914503626 100644
--- a/nptl/pthread_mutex_unlock.c
+++ b/nptl/pthread_mutex_unlock.c
@@ -23,6 +23,10 @@
#include <lowlevellock.h>
#include <stap-probe.h>
+#ifndef lll_unlock_elision
+#define lll_unlock_elision(a,b) ({ lll_unlock (a,b); 0; })
+#endif
+
static int
internal_function
__pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
@@ -34,8 +38,9 @@ __pthread_mutex_unlock_usercnt (mutex, decr)
pthread_mutex_t *mutex;
int decr;
{
- int type = PTHREAD_MUTEX_TYPE (mutex);
- if (__builtin_expect (type & ~PTHREAD_MUTEX_KIND_MASK_NP, 0))
+ int type = PTHREAD_MUTEX_TYPE_ELISION (mutex);
+ if (__builtin_expect (type &
+ ~(PTHREAD_MUTEX_KIND_MASK_NP|PTHREAD_MUTEX_ELISION_FLAGS_NP), 0))
return __pthread_mutex_unlock_full (mutex, decr);
if (__builtin_expect (type, PTHREAD_MUTEX_TIMED_NP)
@@ -55,7 +60,14 @@ __pthread_mutex_unlock_usercnt (mutex, decr)
return 0;
}
- else if (__builtin_expect (type == PTHREAD_MUTEX_RECURSIVE_NP, 1))
+ else if (__builtin_expect (type == PTHREAD_MUTEX_TIMED_ELISION_NP, 1))
+ {
+ /* Don't reset the owner/users fields for elision. */
+ return lll_unlock_elision (mutex->__data.__lock,
+ PTHREAD_MUTEX_PSHARED (mutex));
+ }
+ else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
+ == PTHREAD_MUTEX_RECURSIVE_NP, 1))
{
/* Recursive mutex. */
if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
@@ -66,7 +78,8 @@ __pthread_mutex_unlock_usercnt (mutex, decr)
return 0;
goto normal;
}
- else if (__builtin_expect (type == PTHREAD_MUTEX_ADAPTIVE_NP, 1))
+ else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
+ == PTHREAD_MUTEX_ADAPTIVE_NP, 1))
goto normal;
else
{