aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--ChangeLog12
-rw-r--r--nptl/pthread_mutex_unlock.c12
-rw-r--r--sysdeps/nptl/lowlevellock.h18
-rw-r--r--sysdeps/unix/sysv/linux/lowlevellock-futex.h9
-rw-r--r--sysdeps/unix/sysv/linux/sparc/lowlevellock.h6
5 files changed, 44 insertions, 13 deletions
diff --git a/ChangeLog b/ChangeLog
index 9063848e27..4ba6ee4a13 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,15 @@
+2015-12-23 Torvald Riegel <triegel@redhat.com>
+
+ [BZ #13690]
+ * sysdeps/nptl/lowlevellock.h (__lll_unlock): Do not access the lock
+ after releasing it.
+ (__lll_robust_unlock): Likewise.
+ * nptl/pthread_mutex_unlock.c (__pthread_mutex_unlock_full): Likewise.
+ * sysdeps/unix/sysv/linux/sparc/lowlevellock.h (lll_unlock): Likewise.
+ (lll_robust_unlock): Likewise.
+ * sysdeps/unix/sysv/linux/lowlevellock-futex.h (__lll_private_flag):
+ Prevent warnings in callers.
+
2015-12-23 Florian Weimer <fweimer@redhat.com>
* malloc/arena.c (list_lock): Update comment.
diff --git a/nptl/pthread_mutex_unlock.c b/nptl/pthread_mutex_unlock.c
index c078f7ebe3..e2cd52411f 100644
--- a/nptl/pthread_mutex_unlock.c
+++ b/nptl/pthread_mutex_unlock.c
@@ -230,16 +230,18 @@ __pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
/* One less user. */
--mutex->__data.__nusers;
- /* Unlock. */
+ /* Unlock. Load all necessary mutex data before releasing the mutex
+ to not violate the mutex destruction requirements (see
+ lll_unlock). */
+ int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
+ int private = (robust
+ ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
+ : PTHREAD_MUTEX_PSHARED (mutex));
if ((mutex->__data.__lock & FUTEX_WAITERS) != 0
|| atomic_compare_and_exchange_bool_rel (&mutex->__data.__lock, 0,
THREAD_GETMEM (THREAD_SELF,
tid)))
{
- int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
- int private = (robust
- ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
- : PTHREAD_MUTEX_PSHARED (mutex));
INTERNAL_SYSCALL_DECL (__err);
INTERNAL_SYSCALL (futex, __err, 2, &mutex->__data.__lock,
__lll_private_flag (FUTEX_UNLOCK_PI, private));
diff --git a/sysdeps/nptl/lowlevellock.h b/sysdeps/nptl/lowlevellock.h
index 27f41424ea..7d41ef0335 100644
--- a/sysdeps/nptl/lowlevellock.h
+++ b/sysdeps/nptl/lowlevellock.h
@@ -191,14 +191,21 @@ extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *,
that's cast to void. */
/* Unconditionally set FUTEX to 0 (not acquired), releasing the lock. If FUTEX
was >1 (acquired, possibly with waiters), then wake any waiters. The waiter
- that acquires the lock will set FUTEX to >1. */
+ that acquires the lock will set FUTEX to >1.
+ Evaluate PRIVATE before releasing the lock so that we do not violate the
+ mutex destruction requirements. Specifically, we need to ensure that
+ another thread can destroy the mutex (and reuse its memory) once it
+ acquires the lock and when there will be no further lock acquisitions;
+ thus, we must not access the lock after releasing it, or those accesses
+ could be concurrent with mutex destruction or reuse of the memory. */
#define __lll_unlock(futex, private) \
((void) \
({ \
int *__futex = (futex); \
+ int __private = (private); \
int __oldval = atomic_exchange_rel (__futex, 0); \
if (__glibc_unlikely (__oldval > 1)) \
- lll_futex_wake (__futex, 1, private); \
+ lll_futex_wake (__futex, 1, __private); \
}))
#define lll_unlock(futex, private) \
__lll_unlock (&(futex), private)
@@ -209,14 +216,17 @@ extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *,
that's cast to void. */
/* Unconditionally set FUTEX to 0 (not acquired), releasing the lock. If FUTEX
had FUTEX_WAITERS set then wake any waiters. The waiter that acquires the
- lock will set FUTEX_WAITERS. */
+ lock will set FUTEX_WAITERS.
+ Evaluate PRIVATE before releasing the lock so that we do not violate the
+ mutex destruction requirements (see __lll_unlock). */
#define __lll_robust_unlock(futex, private) \
((void) \
({ \
int *__futex = (futex); \
+ int __private = (private); \
int __oldval = atomic_exchange_rel (__futex, 0); \
if (__glibc_unlikely (__oldval & FUTEX_WAITERS)) \
- lll_futex_wake (__futex, 1, private); \
+ lll_futex_wake (__futex, 1, __private); \
}))
#define lll_robust_unlock(futex, private) \
__lll_robust_unlock (&(futex), private)
diff --git a/sysdeps/unix/sysv/linux/lowlevellock-futex.h b/sysdeps/unix/sysv/linux/lowlevellock-futex.h
index 59f6627bdb..40825f0306 100644
--- a/sysdeps/unix/sysv/linux/lowlevellock-futex.h
+++ b/sysdeps/unix/sysv/linux/lowlevellock-futex.h
@@ -54,8 +54,13 @@
#if IS_IN (libc) || IS_IN (rtld)
/* In libc.so or ld.so all futexes are private. */
# ifdef __ASSUME_PRIVATE_FUTEX
-# define __lll_private_flag(fl, private) \
- ((fl) | FUTEX_PRIVATE_FLAG)
+# define __lll_private_flag(fl, private) \
+ ({ \
+ /* Prevent warnings in callers of this macro. */ \
+ int __lll_private_flag_priv __attribute__ ((unused)); \
+ __lll_private_flag_priv = (private); \
+ ((fl) | FUTEX_PRIVATE_FLAG); \
+ })
# else
# define __lll_private_flag(fl, private) \
((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex))
diff --git a/sysdeps/unix/sysv/linux/sparc/lowlevellock.h b/sysdeps/unix/sysv/linux/sparc/lowlevellock.h
index 7608c01d17..9fa73371fc 100644
--- a/sysdeps/unix/sysv/linux/sparc/lowlevellock.h
+++ b/sysdeps/unix/sysv/linux/sparc/lowlevellock.h
@@ -126,17 +126,19 @@ __lll_robust_timedlock (int *futex, const struct timespec *abstime,
#define lll_unlock(lock, private) \
((void) ({ \
int *__futex = &(lock); \
+ int __private = (private); \
int __val = atomic_exchange_24_rel (__futex, 0); \
if (__glibc_unlikely (__val > 1)) \
- lll_futex_wake (__futex, 1, private); \
+ lll_futex_wake (__futex, 1, __private); \
}))
#define lll_robust_unlock(lock, private) \
((void) ({ \
int *__futex = &(lock); \
+ int __private = (private); \
int __val = atomic_exchange_rel (__futex, 0); \
if (__glibc_unlikely (__val & FUTEX_WAITERS)) \
- lll_futex_wake (__futex, 1, private); \
+ lll_futex_wake (__futex, 1, __private); \
}))
#define lll_islocked(futex) \