aboutsummaryrefslogtreecommitdiff
path: root/nptl/pthread_rwlock_rdlock.c
diff options
context:
space:
mode:
authorTorvald Riegel <triegel@redhat.com>2015-04-28 23:24:36 +0200
committerTorvald Riegel <triegel@redhat.com>2015-06-04 15:34:30 +0200
commitb634486d57a14b53f1cfcf739e41ddf826e51977 (patch)
tree65ad6788da14adc8effdd9d1b4236ac24af710d7 /nptl/pthread_rwlock_rdlock.c
parent3c9c61febede148b79d8509e16588152d99b3774 (diff)
downloadglibc-b634486d57a14b53f1cfcf739e41ddf826e51977.tar
glibc-b634486d57a14b53f1cfcf739e41ddf826e51977.tar.gz
glibc-b634486d57a14b53f1cfcf739e41ddf826e51977.tar.bz2
glibc-b634486d57a14b53f1cfcf739e41ddf826e51977.zip
Fix missing wake-ups in pthread_rwlock_rdlock.
This adds wake-ups that would be missing if assuming that for a non-writer-preferring rwlock, if one thread has acquired a rdlock and does not release it, another thread will eventually acquire a rdlock too despite concurrent write lock acquisition attempts. BZ 14958 is about supporting this assumption. Strictly speaking, this isn't a valid test case, but nonetheless worth supporting (see comment 7 of BZ 14958).
Diffstat (limited to 'nptl/pthread_rwlock_rdlock.c')
-rw-r--r--nptl/pthread_rwlock_rdlock.c40
1 files changed, 38 insertions, 2 deletions
diff --git a/nptl/pthread_rwlock_rdlock.c b/nptl/pthread_rwlock_rdlock.c
index 0edca654a3..004a386fd5 100644
--- a/nptl/pthread_rwlock_rdlock.c
+++ b/nptl/pthread_rwlock_rdlock.c
@@ -23,6 +23,7 @@
#include <pthreadP.h>
#include <stap-probe.h>
#include <elide.h>
+#include <stdbool.h>
/* Acquire read lock for RWLOCK. Slow path. */
@@ -30,6 +31,7 @@ static int __attribute__((noinline))
__pthread_rwlock_rdlock_slow (pthread_rwlock_t *rwlock)
{
int result = 0;
+ bool wake = false;
/* Lock is taken in caller. */
@@ -81,7 +83,17 @@ __pthread_rwlock_rdlock_slow (pthread_rwlock_t *rwlock)
result = EAGAIN;
}
else
- LIBC_PROBE (rdlock_acquire_read, 1, rwlock);
+ {
+ LIBC_PROBE (rdlock_acquire_read, 1, rwlock);
+ /* See pthread_rwlock_rdlock. */
+ if (rwlock->__data.__nr_readers == 1
+ && rwlock->__data.__nr_readers_queued > 0
+ && rwlock->__data.__nr_writers_queued > 0)
+ {
+ ++rwlock->__data.__readers_wakeup;
+ wake = true;
+ }
+ }
break;
}
@@ -90,6 +102,10 @@ __pthread_rwlock_rdlock_slow (pthread_rwlock_t *rwlock)
/* We are done, free the lock. */
lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
+ if (wake)
+ lll_futex_wake (&rwlock->__data.__readers_wakeup, INT_MAX,
+ rwlock->__data.__shared);
+
return result;
}
@@ -100,6 +116,7 @@ int
__pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
{
int result = 0;
+ bool wake = false;
LIBC_PROBE (rdlock_entry, 1, rwlock);
@@ -126,11 +143,30 @@ __pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
result = EAGAIN;
}
else
- LIBC_PROBE (rdlock_acquire_read, 1, rwlock);
+ {
+ LIBC_PROBE (rdlock_acquire_read, 1, rwlock);
+ /* If we are the first reader, and there are blocked readers and
+ writers (which we don't prefer, see above), then it can be the
+ case that we stole the lock from a writer that was already woken
+ to acquire it. That means that we need to take over the writer's
+ responsibility to wake all readers (see pthread_rwlock_unlock).
+ Thus, wake all readers in this case. */
+ if (rwlock->__data.__nr_readers == 1
+ && rwlock->__data.__nr_readers_queued > 0
+ && rwlock->__data.__nr_writers_queued > 0)
+ {
+ ++rwlock->__data.__readers_wakeup;
+ wake = true;
+ }
+ }
/* We are done, free the lock. */
lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
+ if (wake)
+ lll_futex_wake (&rwlock->__data.__readers_wakeup, INT_MAX,
+ rwlock->__data.__shared);
+
return result;
}