aboutsummaryrefslogtreecommitdiff
path: root/nptl
diff options
context:
space:
mode:
authorTorvald Riegel <triegel@redhat.com>2016-06-14 15:12:00 +0200
committerTorvald Riegel <triegel@redhat.com>2016-06-24 23:04:40 +0300
commit76a0b73e8102c5bfb5cb791e34992472f5d1d33e (patch)
tree2fcd8ece66b944eed3ca046d79651c7a7573736f /nptl
parent40244be3729149ff440caf18e445ec17b0d0b511 (diff)
downloadglibc-76a0b73e8102c5bfb5cb791e34992472f5d1d33e.tar
glibc-76a0b73e8102c5bfb5cb791e34992472f5d1d33e.tar.gz
glibc-76a0b73e8102c5bfb5cb791e34992472f5d1d33e.tar.bz2
glibc-76a0b73e8102c5bfb5cb791e34992472f5d1d33e.zip
Remove atomic_compare_and_exchange_bool_rel.
atomic_compare_and_exchange_bool_rel and catomic_compare_and_exchange_bool_rel are removed and replaced with the new C11-like atomic_compare_exchange_weak_release. The concurrent code in nscd/cache.c has not been reviewed yet, so this patch does not add detailed comments. * nscd/cache.c (cache_add): Use new C11-like atomic operation instead of atomic_compare_and_exchange_bool_rel. * nptl/pthread_mutex_unlock.c (__pthread_mutex_unlock_full): Likewise. * include/atomic.h (atomic_compare_and_exchange_bool_rel, catomic_compare_and_exchange_bool_rel): Remove. * sysdeps/aarch64/atomic-machine.h (atomic_compare_and_exchange_bool_rel): Likewise. * sysdeps/alpha/atomic-machine.h (atomic_compare_and_exchange_bool_rel): Likewise. * sysdeps/arm/atomic-machine.h (atomic_compare_and_exchange_bool_rel): Likewise. * sysdeps/mips/atomic-machine.h (atomic_compare_and_exchange_bool_rel): Likewise. * sysdeps/tile/atomic-machine.h (atomic_compare_and_exchange_bool_rel): Likewise.
Diffstat (limited to 'nptl')
-rw-r--r--nptl/pthread_mutex_unlock.c34
1 files changed, 22 insertions, 12 deletions
diff --git a/nptl/pthread_mutex_unlock.c b/nptl/pthread_mutex_unlock.c
index 334ce38342..82aaa95d7b 100644
--- a/nptl/pthread_mutex_unlock.c
+++ b/nptl/pthread_mutex_unlock.c
@@ -237,15 +237,24 @@ __pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
int private = (robust
? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
: PTHREAD_MUTEX_PSHARED (mutex));
- if ((mutex->__data.__lock & FUTEX_WAITERS) != 0
- || atomic_compare_and_exchange_bool_rel (&mutex->__data.__lock, 0,
- THREAD_GETMEM (THREAD_SELF,
- tid)))
+ /* Unlock the mutex using a CAS unless there are futex waiters or our
+ TID is not the value of __lock anymore, in which case we let the
+ kernel take care of the situation. Use release MO in the CAS to
+ synchronize with acquire MO in lock acquisitions. */
+ int l = atomic_load_relaxed (&mutex->__data.__lock);
+ do
{
- INTERNAL_SYSCALL_DECL (__err);
- INTERNAL_SYSCALL (futex, __err, 2, &mutex->__data.__lock,
- __lll_private_flag (FUTEX_UNLOCK_PI, private));
+ if (((l & FUTEX_WAITERS) != 0)
+ || (l != THREAD_GETMEM (THREAD_SELF, tid)))
+ {
+ INTERNAL_SYSCALL_DECL (__err);
+ INTERNAL_SYSCALL (futex, __err, 2, &mutex->__data.__lock,
+ __lll_private_flag (FUTEX_UNLOCK_PI, private));
+ break;
+ }
}
+ while (!atomic_compare_exchange_weak_release (&mutex->__data.__lock,
+ &l, 0));
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
break;
@@ -278,15 +287,16 @@ __pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
/* One less user. */
--mutex->__data.__nusers;
- /* Unlock. */
- int newval, oldval;
+ /* Unlock. Use release MO in the CAS to synchronize with acquire MO in
+ lock acquisitions. */
+ int newval;
+ int oldval = atomic_load_relaxed (&mutex->__data.__lock);
do
{
- oldval = mutex->__data.__lock;
newval = oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK;
}
- while (atomic_compare_and_exchange_bool_rel (&mutex->__data.__lock,
- newval, oldval));
+ while (!atomic_compare_exchange_weak_release (&mutex->__data.__lock,
+ &oldval, newval));
if ((oldval & ~PTHREAD_MUTEX_PRIO_CEILING_MASK) > 1)
lll_futex_wake (&mutex->__data.__lock, 1,