aboutsummaryrefslogtreecommitdiff
path: root/nptl/pthread_mutex_lock.c
diff options
context:
space:
mode:
Diffstat (limited to 'nptl/pthread_mutex_lock.c')
-rw-r--r--nptl/pthread_mutex_lock.c129
1 files changed, 129 insertions, 0 deletions
diff --git a/nptl/pthread_mutex_lock.c b/nptl/pthread_mutex_lock.c
index 06eef49c71..5345766883 100644
--- a/nptl/pthread_mutex_lock.c
+++ b/nptl/pthread_mutex_lock.c
@@ -20,6 +20,7 @@
#include <assert.h>
#include <errno.h>
#include <stdlib.h>
+#include <unistd.h>
#include "pthreadP.h"
#include <lowlevellock.h>
@@ -205,6 +206,134 @@ __pthread_mutex_lock (mutex)
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
break;
+ case PTHREAD_MUTEX_PI_RECURSIVE_NP:
+ case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
+ case PTHREAD_MUTEX_PI_NORMAL_NP:
+ case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
+ case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
+ case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
+ case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
+ case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
+ {
+ int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
+ int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
+
+ if (robust)
+ /* Note: robust PI futexes are signaled by setting bit 0. */
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
+ (void *) (((uintptr_t) &mutex->__data.__list.__next)
+ | 1));
+
+ oldval = mutex->__data.__lock;
+
+ /* Check whether we already hold the mutex. */
+ if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
+ {
+ if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
+ {
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+ return EDEADLK;
+ }
+
+ if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
+ {
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+
+ /* Just bump the counter. */
+ if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
+ /* Overflow of the counter. */
+ return EAGAIN;
+
+ ++mutex->__data.__count;
+
+ return 0;
+ }
+ }
+
+ int newval = id;
+#ifdef NO_INCR
+ newval |= FUTEX_WAITERS;
+#endif
+ oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
+ newval, 0);
+
+ if (oldval != 0)
+ {
+ /* The mutex is locked. The kernel will now take care of
+ everything. */
+ INTERNAL_SYSCALL_DECL (__err);
+ int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
+ FUTEX_LOCK_PI, 1, 0);
+
+ if (INTERNAL_SYSCALL_ERROR_P (e, __err)
+ && (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
+ || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK))
+ {
+ assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
+ || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
+ && kind != PTHREAD_MUTEX_RECURSIVE_NP));
+ /* ESRCH can happen only for non-robust PI mutexes where
+ the owner of the lock died. */
+ assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH || !robust);
+
+ /* Delay the thread indefinitely. */
+ while (1)
+ __pause_nocancel ();
+ }
+
+ oldval = mutex->__data.__lock;
+
+ assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
+ }
+
+ if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
+ {
+ atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
+
+ /* We got the mutex. */
+ mutex->__data.__count = 1;
+ /* But it is inconsistent unless marked otherwise. */
+ mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
+
+ ENQUEUE_MUTEX_PI (mutex);
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+
+ /* Note that we deliberately exit here. If we fall
+ through to the end of the function __nusers would be
+ incremented which is not correct because the old owner
+ has to be discounted. If we are not supposed to
+ increment __nusers we actually have to decrement it here. */
+#ifdef NO_INCR
+ --mutex->__data.__nusers;
+#endif
+
+ return EOWNERDEAD;
+ }
+
+ if (robust
+ && __builtin_expect (mutex->__data.__owner
+ == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
+ {
+ /* This mutex is now not recoverable. */
+ mutex->__data.__count = 0;
+
+ INTERNAL_SYSCALL_DECL (__err);
+ INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
+ FUTEX_UNLOCK_PI, 0, 0);
+
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+ return ENOTRECOVERABLE;
+ }
+
+ mutex->__data.__count = 1;
+ if (robust)
+ {
+ ENQUEUE_MUTEX_PI (mutex);
+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+ }
+ }
+ break;
+
default:
/* Correct code cannot set any other type. */
return EINVAL;