aboutsummaryrefslogtreecommitdiff
path: root/linuxthreads
diff options
context:
space:
mode:
authorUlrich Drepper <drepper@redhat.com>2000-01-05 02:09:12 +0000
committerUlrich Drepper <drepper@redhat.com>2000-01-05 02:09:12 +0000
commit1d2fc9b3c59d0e83e04139ddf633731264b76ea2 (patch)
treec738cf2a40851dc25be2c252ba5dbb7f335b5e14 /linuxthreads
parentf19f2b34439145daf300bf12789bbc61c8d4db28 (diff)
downloadglibc-1d2fc9b3c59d0e83e04139ddf633731264b76ea2.tar
glibc-1d2fc9b3c59d0e83e04139ddf633731264b76ea2.tar.gz
glibc-1d2fc9b3c59d0e83e04139ddf633731264b76ea2.tar.bz2
glibc-1d2fc9b3c59d0e83e04139ddf633731264b76ea2.zip
Redesigned how cancellation unblocks a thread from internal cancellation points (sem_wait, pthread_join, pthread_cond_{wait,timedwait}). Cancellation won't eat a signal in any of these functions (*required* by POSIX and Single Unix Spec!).
2000-01-03 Kaz Kylheku <kaz@ashi.footprints.net> Redesigned how cancellation unblocks a thread from internal cancellation points (sem_wait, pthread_join, pthread_cond_{wait,timedwait}). Cancellation won't eat a signal in any of these functions (*required* by POSIX and Single Unix Spec!). * condvar.c: spontaneous wakeup on pthread_cond_timedwait won't eat a simultaneous condition variable signal (not required by POSIX or Single Unix Spec, but nice). * spinlock.c: __pthread_lock queues back any received restarts that don't belong to it instead of assuming ownership of lock upon any restart; fastlock can no longer be acquired by two threads simultaneously. * restart.h: restarts queue even on kernels that don't have queued real time signals (2.0, early 2.1), thanks to atomic counter, avoiding a rare race condition in pthread_cond_timedwait.
Diffstat (limited to 'linuxthreads')
-rw-r--r--linuxthreads/ChangeLog18
-rw-r--r--linuxthreads/cancel.c44
-rw-r--r--linuxthreads/condvar.c324
-rw-r--r--linuxthreads/internals.h40
-rw-r--r--linuxthreads/join.c48
-rw-r--r--linuxthreads/oldsemaphore.c24
-rw-r--r--linuxthreads/pthread.c66
-rw-r--r--linuxthreads/queue.h5
-rw-r--r--linuxthreads/restart.h35
-rw-r--r--linuxthreads/rwlock.c2
-rw-r--r--linuxthreads/semaphore.c54
-rw-r--r--linuxthreads/spinlock.c24
-rw-r--r--linuxthreads/spinlock.h28
13 files changed, 610 insertions, 102 deletions
diff --git a/linuxthreads/ChangeLog b/linuxthreads/ChangeLog
index 37be0e829f..78e8552a6f 100644
--- a/linuxthreads/ChangeLog
+++ b/linuxthreads/ChangeLog
@@ -1,3 +1,21 @@
+2000-01-03 Kaz Kylheku <kaz@ashi.footprints.net>
+
+ Redesigned how cancellation unblocks a thread from internal
+ cancellation points (sem_wait, pthread_join,
+ pthread_cond_{wait,timedwait}).
+ Cancellation won't eat a signal in any of these functions
+ (*required* by POSIX and Single Unix Spec!).
+ * condvar.c: spontaneous wakeup on pthread_cond_timedwait won't eat a
+ simultaneous condition variable signal (not required by POSIX
+ or Single Unix Spec, but nice).
+ * spinlock.c: __pthread_lock queues back any received restarts
+ that don't belong to it instead of assuming ownership of lock
+ upon any restart; fastlock can no longer be acquired by two threads
+ simultaneously.
+ * restart.h: restarts queue even on kernels that don't have
+ queued real time signals (2.0, early 2.1), thanks to atomic counter,
+ avoiding a rare race condition in pthread_cond_timedwait.
+
1999-12-31 Andreas Jaeger <aj@suse.de>
* internals.h: Remove duplicate prototype declarations.
diff --git a/linuxthreads/cancel.c b/linuxthreads/cancel.c
index c45cac97a3..8fd8c1e60f 100644
--- a/linuxthreads/cancel.c
+++ b/linuxthreads/cancel.c
@@ -52,16 +52,54 @@ int pthread_cancel(pthread_t thread)
{
pthread_handle handle = thread_handle(thread);
int pid;
+ int dorestart = 0;
+ pthread_descr th;
+ pthread_extricate_if *pextricate;
__pthread_lock(&handle->h_lock, NULL);
if (invalid_handle(handle, thread)) {
__pthread_unlock(&handle->h_lock);
return ESRCH;
}
- handle->h_descr->p_canceled = 1;
- pid = handle->h_descr->p_pid;
+
+ th = handle->h_descr;
+
+ if (th->p_canceled) {
+ __pthread_unlock(&handle->h_lock);
+ return 0;
+ }
+
+ pextricate = th->p_extricate;
+ th->p_canceled = 1;
+ pid = th->p_pid;
+
+ /* If the thread has registered an extrication interface, then
+ invoke the interface. If it returns 1, then we succeeded in
+ dequeuing the thread from whatever waiting object it was enqueued
+ with. In that case, it is our responsibility to wake it up.
+ And also to set the p_woken_by_cancel flag so the woken thread
+ can tell that it was woken by cancellation. */
+
+ if (pextricate != NULL) {
+ dorestart = pextricate->pu_extricate_func(pextricate->pu_object, th);
+ th->p_woken_by_cancel = dorestart;
+ }
+
__pthread_unlock(&handle->h_lock);
- kill(pid, __pthread_sig_cancel);
+
+ /* If the thread has suspended or is about to, then we unblock it by
+ issuing a restart, instead of a cancel signal. Otherwise we send
+ the cancel signal to unblock the thread from a cancellation point,
+ or to initiate asynchronous cancellation. The restart is needed so
+ we have proper accounting of restarts; suspend decrements the thread's
+ resume count, and restart() increments it. This also means that suspend's
+ handling of the cancel signal is obsolete. */
+
+ if (dorestart)
+ restart(th);
+ else
+ kill(pid, __pthread_sig_cancel);
+
return 0;
}
diff --git a/linuxthreads/condvar.c b/linuxthreads/condvar.c
index 2ea7513c68..87a93a9115 100644
--- a/linuxthreads/condvar.c
+++ b/linuxthreads/condvar.c
@@ -25,6 +25,22 @@
#include "queue.h"
#include "restart.h"
+static int pthread_cond_timedwait_relative_old(pthread_cond_t *,
+ pthread_mutex_t *, const struct timespec *);
+
+static int pthread_cond_timedwait_relative_new(pthread_cond_t *,
+ pthread_mutex_t *, const struct timespec *);
+
+static int (*pthread_cond_tw_rel)(pthread_cond_t *, pthread_mutex_t *,
+ const struct timespec *) = pthread_cond_timedwait_relative_old;
+
+/* initialize this module */
+void __pthread_init_condvar(int rt_sig_available)
+{
+ if (rt_sig_available)
+ pthread_cond_tw_rel = pthread_cond_timedwait_relative_new;
+}
+
int pthread_cond_init(pthread_cond_t *cond,
const pthread_condattr_t *cond_attr)
{
@@ -39,54 +55,125 @@ int pthread_cond_destroy(pthread_cond_t *cond)
return 0;
}
+/* Function called by pthread_cancel to remove the thread from
+ waiting on a condition variable queue. */
+
+static int cond_extricate_func(void *obj, pthread_descr th)
+{
+ volatile pthread_descr self = thread_self();
+ pthread_cond_t *cond = obj;
+ int did_remove = 0;
+
+ __pthread_lock(&cond->__c_lock, self);
+ did_remove = remove_from_queue(&cond->__c_waiting, th);
+ __pthread_unlock(&cond->__c_lock);
+
+ return did_remove;
+}
+
int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
{
volatile pthread_descr self = thread_self();
+ pthread_extricate_if extr;
+ int already_canceled = 0;
+
+ /* Set up extrication interface */
+ extr.pu_object = cond;
+ extr.pu_extricate_func = cond_extricate_func;
+
+ /* Register extrication interface */
+ __pthread_set_own_extricate_if(self, &extr);
+
+ /* Atomically enqueue thread for waiting, but only if it is not
+ canceled. If the thread is canceled, then it will fall through the
+ suspend call below, and then call pthread_exit without
+ having to worry about whether it is still on the condition variable queue.
+ This depends on pthread_cancel setting p_canceled before calling the
+ extricate function. */
__pthread_lock(&cond->__c_lock, self);
- enqueue(&cond->__c_waiting, self);
+ if (!(THREAD_GETMEM(self, p_canceled)
+ && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE))
+ enqueue(&cond->__c_waiting, self);
+ else
+ already_canceled = 1;
__pthread_unlock(&cond->__c_lock);
+
+ if (already_canceled) {
+ __pthread_set_own_extricate_if(self, 0);
+ pthread_exit(PTHREAD_CANCELED);
+ }
+
pthread_mutex_unlock(mutex);
- suspend_with_cancellation(self);
- pthread_mutex_lock(mutex);
- /* This is a cancellation point */
- if (THREAD_GETMEM(self, p_canceled)
+
+ suspend(self);
+ __pthread_set_own_extricate_if(self, 0);
+
+ /* Check for cancellation again, to provide correct cancellation
+ point behavior */
+
+ if (THREAD_GETMEM(self, p_woken_by_cancel)
&& THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE) {
- /* Remove ourselves from the waiting queue if we're still on it */
- __pthread_lock(&cond->__c_lock, self);
- remove_from_queue(&cond->__c_waiting, self);
- __pthread_unlock(&cond->__c_lock);
+ THREAD_SETMEM(self, p_woken_by_cancel, 0);
+ pthread_mutex_lock(mutex);
pthread_exit(PTHREAD_CANCELED);
}
+
+ pthread_mutex_lock(mutex);
return 0;
}
+/* The following function is used on kernels that don't have rt signals.
+ SIGUSR1 is used as the restart signal. The different code is needed
+ because that ordinary signal does not queue. */
+
static int
-pthread_cond_timedwait_relative(pthread_cond_t *cond,
+pthread_cond_timedwait_relative_old(pthread_cond_t *cond,
pthread_mutex_t *mutex,
const struct timespec * reltime)
{
volatile pthread_descr self = thread_self();
sigset_t unblock, initial_mask;
- int retsleep;
+ int retsleep, already_canceled, was_signalled;
sigjmp_buf jmpbuf;
+ pthread_extricate_if extr;
+
+requeue_and_wait_again:
- /* Wait on the condition */
+ retsleep = 0;
+ already_canceled = 0;
+ was_signalled = 0;
+
+ /* Set up extrication interface */
+ extr.pu_object = cond;
+ extr.pu_extricate_func = cond_extricate_func;
+
+ /* Register extrication interface */
+ __pthread_set_own_extricate_if(self, &extr);
+
+ /* Enqueue to wait on the condition and check for cancellation. */
__pthread_lock(&cond->__c_lock, self);
- enqueue(&cond->__c_waiting, self);
+ if (!(THREAD_GETMEM(self, p_canceled)
+ && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE))
+ enqueue(&cond->__c_waiting, self);
+ else
+ already_canceled = 1;
__pthread_unlock(&cond->__c_lock);
+
+ if (already_canceled) {
+ __pthread_set_own_extricate_if(self, 0);
+ pthread_exit(PTHREAD_CANCELED);
+ }
+
pthread_mutex_unlock(mutex);
- continue_waiting:
- /* Set up a longjmp handler for the restart and cancel signals */
- if (sigsetjmp(jmpbuf, 1) == 0) {
- THREAD_SETMEM(self, p_signal_jmp, &jmpbuf);
- THREAD_SETMEM(self, p_cancel_jmp, &jmpbuf);
- THREAD_SETMEM(self, p_signal, 0);
- /* Check for cancellation */
- if (THREAD_GETMEM(self, p_canceled)
- && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE) {
- retsleep = -1;
- } else {
+
+ if (atomic_decrement(&self->p_resume_count) == 0) {
+ /* Set up a longjmp handler for the restart signal, unblock
+ the signal and sleep. */
+
+ if (sigsetjmp(jmpbuf, 1) == 0) {
+ THREAD_SETMEM(self, p_signal_jmp, &jmpbuf);
+ THREAD_SETMEM(self, p_signal, 0);
/* Unblock the restart signal */
sigemptyset(&unblock);
sigaddset(&unblock, __pthread_sig_restart);
@@ -95,37 +182,180 @@ pthread_cond_timedwait_relative(pthread_cond_t *cond,
retsleep = __libc_nanosleep(reltime, NULL);
/* Block the restart signal again */
sigprocmask(SIG_SETMASK, &initial_mask, NULL);
+ was_signalled = 0;
+ } else {
+ retsleep = -1;
+ was_signalled = 1;
}
- } else {
- retsleep = -1;
+ THREAD_SETMEM(self, p_signal_jmp, NULL);
}
- THREAD_SETMEM(self, p_signal_jmp, NULL);
- THREAD_SETMEM(self, p_cancel_jmp, NULL);
- /* Here, either the condition was signaled (self->p_signal != 0)
- or we got canceled (self->p_canceled != 0)
- or the timeout occurred (retsleep == 0)
- or another interrupt occurred (retsleep == -1) */
- /* This is a cancellation point */
- if (THREAD_GETMEM(self, p_canceled)
+
+ /* Now was_signalled is true if we exited the above code
+ due to the delivery of a restart signal. In that case,
+ we know we have been dequeued and resumed and that the
+ resume count is balanced. Otherwise, there are some
+ cases to consider. First, try to bump up the resume count
+ back to zero. If it goes to 1, it means restart() was
+ invoked on this thread. The signal must be consumed
+ and the count bumped down and everything is cool.
+ Otherwise, no restart was delivered yet, so we remove
+ the thread from the queue. If this succeeds, it's a clear
+ case of timeout. If we fail to remove from the queue, then we
+ must wait for a restart. */
+
+ if (!was_signalled) {
+ if (atomic_increment(&self->p_resume_count) != -1) {
+ __pthread_wait_for_restart_signal(self);
+ atomic_decrement(&self->p_resume_count); /* should be zero now! */
+ } else {
+ int was_on_queue;
+ __pthread_lock(&cond->__c_lock, self);
+ was_on_queue = remove_from_queue(&cond->__c_waiting, self);
+ __pthread_unlock(&cond->__c_lock);
+
+ if (was_on_queue) {
+ __pthread_set_own_extricate_if(self, 0);
+ pthread_mutex_lock(mutex);
+
+ if (retsleep == 0)
+ return ETIMEDOUT;
+ /* Woken by a signal: resume waiting as
+ required by Single Unix Specification. */
+ goto requeue_and_wait_again;
+ }
+
+ suspend(self);
+ }
+ }
+
+ __pthread_set_own_extricate_if(self, 0);
+
+ /* The remaining logic is the same as in other cancellable waits,
+ such as pthread_join sem_wait or pthread_cond wait. */
+
+ if (THREAD_GETMEM(self, p_woken_by_cancel)
&& THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE) {
- __pthread_lock(&cond->__c_lock, self);
- remove_from_queue(&cond->__c_waiting, self);
- __pthread_unlock(&cond->__c_lock);
+ THREAD_SETMEM(self, p_woken_by_cancel, 0);
pthread_mutex_lock(mutex);
pthread_exit(PTHREAD_CANCELED);
}
- /* If not signaled: also remove ourselves and return an error code, but
- only if the timeout has elapsed. If not, just continue waiting. */
- if (THREAD_GETMEM(self, p_signal) == 0) {
- if (retsleep != 0)
- goto continue_waiting;
+
+ pthread_mutex_lock(mutex);
+ return 0;
+}
+
+/* The following function is used on new (late 2.1 and 2.2 and higher) kernels
+ that have rt signals which queue. */
+
+static int
+pthread_cond_timedwait_relative_new(pthread_cond_t *cond,
+ pthread_mutex_t *mutex,
+ const struct timespec * reltime)
+{
+ volatile pthread_descr self = thread_self();
+ sigset_t unblock, initial_mask;
+ int retsleep, already_canceled, was_signalled;
+ sigjmp_buf jmpbuf;
+ pthread_extricate_if extr;
+
+ requeue_and_wait_again:
+
+ retsleep = 0;
+ already_canceled = 0;
+ was_signalled = 0;
+
+ /* Set up extrication interface */
+ extr.pu_object = cond;
+ extr.pu_extricate_func = cond_extricate_func;
+
+ /* Register extrication interface */
+ __pthread_set_own_extricate_if(self, &extr);
+
+ /* Enqueue to wait on the condition and check for cancellation. */
+ __pthread_lock(&cond->__c_lock, self);
+ if (!(THREAD_GETMEM(self, p_canceled)
+ && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE))
+ enqueue(&cond->__c_waiting, self);
+ else
+ already_canceled = 1;
+ __pthread_unlock(&cond->__c_lock);
+
+ if (already_canceled) {
+ __pthread_set_own_extricate_if(self, 0);
+ pthread_exit(PTHREAD_CANCELED);
+ }
+
+ pthread_mutex_unlock(mutex);
+
+ /* Set up a longjmp handler for the restart signal, unblock
+ the signal and sleep. */
+
+ if (sigsetjmp(jmpbuf, 1) == 0) {
+ THREAD_SETMEM(self, p_signal_jmp, &jmpbuf);
+ THREAD_SETMEM(self, p_signal, 0);
+ /* Unblock the restart signal */
+ sigemptyset(&unblock);
+ sigaddset(&unblock, __pthread_sig_restart);
+ sigprocmask(SIG_UNBLOCK, &unblock, &initial_mask);
+ /* Sleep for the required duration */
+ retsleep = __libc_nanosleep(reltime, NULL);
+ /* Block the restart signal again */
+ sigprocmask(SIG_SETMASK, &initial_mask, NULL);
+ was_signalled = 0;
+ } else {
+ retsleep = -1;
+ was_signalled = 1;
+ }
+ THREAD_SETMEM(self, p_signal_jmp, NULL);
+
+ /* Now was_signalled is true if we exited the above code
+ due to the delivery of a restart signal. In that case,
+ everything is cool. We have been removed from the queue
+ by the other thread, and consumed its signal.
+
+ Otherwise we this thread woke up spontaneously, or due to a signal other
+ than restart. The next thing to do is to try to remove the thread
+ from the queue. This may fail due to a race against another thread
+ trying to do the same. In the failed case, we know we were signalled,
+ and we may also have to consume a restart signal. */
+
+ if (!was_signalled) {
+ int was_on_queue;
+
+ /* __pthread_lock will queue back any spurious restarts that
+ may happen to it. */
+
__pthread_lock(&cond->__c_lock, self);
- remove_from_queue(&cond->__c_waiting, self);
+ was_on_queue = remove_from_queue(&cond->__c_waiting, self);
__pthread_unlock(&cond->__c_lock);
+
+ if (was_on_queue) {
+ __pthread_set_own_extricate_if(self, 0);
+ pthread_mutex_lock(mutex);
+
+ if (retsleep == 0)
+ return ETIMEDOUT;
+ /* Woken by a signal: resume waiting as
+ required by Single Unix Specification. */
+ goto requeue_and_wait_again;
+ }
+
+ /* Eat the outstanding restart() from the signaller */
+ suspend(self);
+ }
+
+ __pthread_set_own_extricate_if(self, 0);
+
+ /* The remaining logic is the same as in other cancellable waits,
+ such as pthread_join sem_wait or pthread_cond wait. */
+
+ if (THREAD_GETMEM(self, p_woken_by_cancel)
+ && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE) {
+ THREAD_SETMEM(self, p_woken_by_cancel, 0);
pthread_mutex_lock(mutex);
- return ETIMEDOUT;
+ pthread_exit(PTHREAD_CANCELED);
}
- /* Otherwise, return normally */
+
pthread_mutex_lock(mutex);
return 0;
}
@@ -144,7 +374,9 @@ int pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
reltime.tv_sec -= 1;
}
if (reltime.tv_sec < 0) return ETIMEDOUT;
- return pthread_cond_timedwait_relative(cond, mutex, &reltime);
+
+ /* Indirect call through pointer! */
+ return pthread_cond_tw_rel(cond, mutex, &reltime);
}
int pthread_cond_signal(pthread_cond_t *cond)
diff --git a/linuxthreads/internals.h b/linuxthreads/internals.h
index c5fde1f883..8af2fca335 100644
--- a/linuxthreads/internals.h
+++ b/linuxthreads/internals.h
@@ -86,6 +86,27 @@ struct pthread_key_struct {
typedef struct _pthread_descr_struct * pthread_descr;
+/* Callback interface for removing the thread from waiting on an
+ object if it is cancelled while waiting or about to wait.
+ This hold a pointer to the object, and a pointer to a function
+ which ``extricates'' the thread from its enqueued state.
+ The function takes two arguments: pointer to the wait object,
+ and a pointer to the thread. It returns 1 if an extrication
+ actually occured, and hence the thread must also be signalled.
+ It returns 0 if the thread had already been extricated. */
+
+typedef struct _pthread_extricate_struct {
+ void *pu_object;
+ int (*pu_extricate_func)(void *, pthread_descr);
+} pthread_extricate_if;
+
+/* Atomic counter made possible by compare_and_swap */
+
+struct pthread_atomic {
+ long p_count;
+ int p_spinlock;
+};
+
struct _pthread_descr_struct {
pthread_descr p_nextlive, p_prevlive;
/* Double chaining of active threads */
@@ -96,6 +117,7 @@ struct _pthread_descr_struct {
int p_priority; /* Thread priority (== 0 if not realtime) */
struct _pthread_fastlock * p_lock; /* Spinlock for synchronized accesses */
int p_signal; /* last signal received */
+ struct pthread_atomic p_resume_count; /* number of times restart() was called on thread */
sigjmp_buf * p_signal_jmp; /* where to siglongjmp on a signal or NULL */
sigjmp_buf * p_cancel_jmp; /* where to siglongjmp on a cancel or NULL */
char p_terminated; /* true if terminated e.g. by pthread_exit */
@@ -108,6 +130,8 @@ struct _pthread_descr_struct {
char p_cancelstate; /* cancellation state */
char p_canceltype; /* cancellation type (deferred/async) */
char p_canceled; /* cancellation request pending */
+ char p_woken_by_cancel; /* cancellation performed wakeup */
+ pthread_extricate_if *p_extricate; /* See above */
int * p_errnop; /* pointer to used errno variable */
int p_errno; /* error returned by last system call */
int * p_h_errnop; /* pointer to used h_errno variable */
@@ -353,6 +377,7 @@ void __pthread_manager_sighandler(int sig);
void __pthread_reset_main_thread(void);
void __fresetlockfiles(void);
void __pthread_manager_adjust_prio(int thread_prio);
+void __pthread_set_own_extricate_if(pthread_descr self, pthread_extricate_if *peif);
extern int __pthread_attr_setguardsize (pthread_attr_t *__attr,
size_t __guardsize);
@@ -372,6 +397,21 @@ extern int __pthread_mutexattr_gettype (const pthread_mutexattr_t *__attr,
int *__kind);
extern void __pthread_kill_other_threads_np (void);
+void __pthread_restart_old(pthread_descr th);
+void __pthread_suspend_old(pthread_descr self);
+
+void __pthread_restart_new(pthread_descr th);
+void __pthread_suspend_new(pthread_descr self);
+
+void __pthread_wait_for_restart_signal(pthread_descr self);
+
+void __pthread_init_condvar(int rt_sig_available);
+
+/* Global pointers to old or new suspend functions */
+
+extern void (*__pthread_restart)(pthread_descr);
+extern void (*__pthread_suspend)(pthread_descr);
+
/* Prototypes for the function without cancelation support when the
normal version has it. */
extern int __libc_close (int fd);
diff --git a/linuxthreads/join.c b/linuxthreads/join.c
index 71db541391..5e6b78ab3b 100644
--- a/linuxthreads/join.c
+++ b/linuxthreads/join.c
@@ -79,12 +79,37 @@ void pthread_exit(void * retval)
_exit(0);
}
+/* Function called by pthread_cancel to remove the thread from
+ waiting on a condition variable queue. */
+
+static int join_extricate_func(void *obj, pthread_descr th)
+{
+ volatile pthread_descr self = thread_self();
+ pthread_handle handle = obj;
+ pthread_descr jo;
+ int did_remove = 0;
+
+ __pthread_lock(&handle->h_lock, self);
+ jo = handle->h_descr;
+ did_remove = jo->p_joining != NULL;
+ jo->p_joining = NULL;
+ __pthread_unlock(&handle->h_lock);
+
+ return did_remove;
+}
+
int pthread_join(pthread_t thread_id, void ** thread_return)
{
volatile pthread_descr self = thread_self();
struct pthread_request request;
pthread_handle handle = thread_handle(thread_id);
pthread_descr th;
+ pthread_extricate_if extr;
+ int already_canceled = 0;
+
+ /* Set up extrication interface */
+ extr.pu_object = handle;
+ extr.pu_extricate_func = join_extricate_func;
__pthread_lock(&handle->h_lock, self);
if (invalid_handle(handle, thread_id)) {
@@ -103,13 +128,28 @@ int pthread_join(pthread_t thread_id, void ** thread_return)
}
/* If not terminated yet, suspend ourselves. */
if (! th->p_terminated) {
- th->p_joining = self;
+ /* Register extrication interface */
+ __pthread_set_own_extricate_if(self, &extr);
+ if (!(THREAD_GETMEM(self, p_canceled)
+ && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE))
+ th->p_joining = self;
+ else
+ already_canceled = 1;
__pthread_unlock(&handle->h_lock);
- suspend_with_cancellation(self);
+
+ if (already_canceled) {
+ __pthread_set_own_extricate_if(self, 0);
+ pthread_exit(PTHREAD_CANCELED);
+ }
+
+ suspend(self);
+ /* Deregister extrication interface */
+ __pthread_set_own_extricate_if(self, 0);
+
/* This is a cancellation point */
- if (THREAD_GETMEM(self, p_canceled)
+ if (THREAD_GETMEM(self, p_woken_by_cancel)
&& THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE) {
- th->p_joining = NULL;
+ THREAD_SETMEM(self, p_woken_by_cancel, 0);
pthread_exit(PTHREAD_CANCELED);
}
__pthread_lock(&handle->h_lock, self);
diff --git a/linuxthreads/oldsemaphore.c b/linuxthreads/oldsemaphore.c
index 72d12d20c3..62d2812693 100644
--- a/linuxthreads/oldsemaphore.c
+++ b/linuxthreads/oldsemaphore.c
@@ -68,13 +68,29 @@ int __old_sem_init(old_sem_t *sem, int pshared, unsigned int value)
return 0;
}
+/* Function called by pthread_cancel to remove the thread from
+ waiting inside __old_sem_wait. Here we simply unconditionally
+ indicate that the thread is to be woken, by returning 1. */
+
+static int old_sem_extricate_func(void *obj, pthread_descr th)
+{
+ return 1;
+}
+
int __old_sem_wait(old_sem_t * sem)
{
long oldstatus, newstatus;
volatile pthread_descr self = thread_self();
pthread_descr * th;
+ pthread_extricate_if extr;
+
+ /* Set up extrication interface */
+ extr.pu_object = 0;
+ extr.pu_extricate_func = old_sem_extricate_func;
while (1) {
+ /* Register extrication interface */
+ __pthread_set_own_extricate_if(self, &extr);
do {
oldstatus = sem->sem_status;
if ((oldstatus & 1) && (oldstatus != 1))
@@ -85,11 +101,15 @@ int __old_sem_wait(old_sem_t * sem)
}
}
while (! sem_compare_and_swap(sem, oldstatus, newstatus));
- if (newstatus & 1)
+ if (newstatus & 1) {
/* We got the semaphore. */
+ __pthread_set_own_extricate_if(self, 0);
return 0;
+ }
/* Wait for sem_post or cancellation */
- suspend_with_cancellation(self);
+ suspend(self);
+ __pthread_set_own_extricate_if(self, 0);
+
/* This is a cancellation point */
if (self->p_canceled && self->p_cancelstate == PTHREAD_CANCEL_ENABLE) {
/* Remove ourselves from the waiting list if we're still on it */
diff --git a/linuxthreads/pthread.c b/linuxthreads/pthread.c
index dfde08f432..f082e95478 100644
--- a/linuxthreads/pthread.c
+++ b/linuxthreads/pthread.c
@@ -43,6 +43,7 @@ struct _pthread_descr_struct __pthread_initial_thread = {
0, /* int p_priority */
&__pthread_handles[0].h_lock, /* struct _pthread_fastlock * p_lock */
0, /* int p_signal */
+ ATOMIC_INITIALIZER, /* struct pthread_atomic p_resume_count */
NULL, /* sigjmp_buf * p_signal_buf */
NULL, /* sigjmp_buf * p_cancel_buf */
0, /* char p_terminated */
@@ -55,6 +56,8 @@ struct _pthread_descr_struct __pthread_initial_thread = {
0, /* char p_cancelstate */
0, /* char p_canceltype */
0, /* char p_canceled */
+ 0, /* char p_woken_by_cancel */
+ NULL, /* struct pthread_extricate_if *p_extricate */
NULL, /* int *p_errnop */
0, /* int p_errno */
NULL, /* int *p_h_errnop */
@@ -86,6 +89,7 @@ struct _pthread_descr_struct __pthread_manager_thread = {
0, /* int p_priority */
&__pthread_handles[1].h_lock, /* struct _pthread_fastlock * p_lock */
0, /* int p_signal */
+ ATOMIC_INITIALIZER, /* struct pthread_atomic p_resume_count */
NULL, /* sigjmp_buf * p_signal_buf */
NULL, /* sigjmp_buf * p_cancel_buf */
0, /* char p_terminated */
@@ -98,6 +102,8 @@ struct _pthread_descr_struct __pthread_manager_thread = {
0, /* char p_cancelstate */
0, /* char p_canceltype */
0, /* char p_canceled */
+ 0, /* char p_woken_by_cancel */
+ NULL, /* struct pthread_extricate_if *p_extricate */
&__pthread_manager_thread.p_errno, /* int *p_errnop */
0, /* int p_errno */
NULL, /* int *p_h_errnop */
@@ -144,6 +150,12 @@ char *__pthread_manager_thread_tos = NULL;
int __pthread_exit_requested = 0;
int __pthread_exit_code = 0;
+/* Pointers that select new or old suspend/resume functions
+ based on availability of rt signals. */
+
+void (*__pthread_restart)(pthread_descr) = __pthread_restart_old;
+void (*__pthread_suspend)(pthread_descr) = __pthread_suspend_old;
+
/* Communicate relevant LinuxThreads constants to gdb */
const int __pthread_threads_max = PTHREAD_THREADS_MAX;
@@ -215,13 +227,18 @@ init_rtsigs (void)
__pthread_sig_cancel = SIGUSR2;
__pthread_sig_debug = 0;
#endif
+ __pthread_init_condvar(0);
}
else
{
#if __SIGRTMAX - __SIGRTMIN >= 3
current_rtmin = __SIGRTMIN + 3;
+ __pthread_restart = __pthread_restart_new;
+ __pthread_suspend = __pthread_wait_for_restart_signal;
+ __pthread_init_condvar(1);
#else
current_rtmin = __SIGRTMIN;
+ __pthread_init_condvar(0);
#endif
current_rtmax = __SIGRTMAX;
@@ -447,7 +464,7 @@ int __pthread_initialize_manager(void)
raise(__pthread_sig_debug);
/* We suspend ourself and gdb will wake us up when it is
ready to handle us. */
- suspend(thread_self());
+ __pthread_wait_for_restart_signal(thread_self());
}
/* Synchronize debugging of the thread manager */
request.req_kind = REQ_DEBUG;
@@ -770,6 +787,53 @@ int __pthread_getconcurrency(void)
}
weak_alias (__pthread_getconcurrency, pthread_getconcurrency)
+void __pthread_set_own_extricate_if(pthread_descr self, pthread_extricate_if *peif)
+{
+ __pthread_lock(self->p_lock, self);
+ THREAD_SETMEM(self, p_extricate, peif);
+ __pthread_unlock(self->p_lock);
+}
+
+/* Primitives for controlling thread execution */
+
+void __pthread_wait_for_restart_signal(pthread_descr self)
+{
+ sigset_t mask;
+
+ sigprocmask(SIG_SETMASK, NULL, &mask); /* Get current signal mask */
+ sigdelset(&mask, __pthread_sig_restart); /* Unblock the restart signal */
+ do {
+ self->p_signal = 0;
+ sigsuspend(&mask); /* Wait for signal */
+ } while (self->p_signal !=__pthread_sig_restart );
+}
+
+/* The _old variants are for 2.0 and early 2.1 kernels which don't have RT signals.
+ On these kernels, we use SIGUSR1 and SIGUSR2 for restart and cancellation.
+ Since the restart signal does not queue, we use an atomic counter to create
+ queuing semantics. This is needed to resolve a rare race condition in
+ pthread_cond_timedwait_relative. */
+
+void __pthread_restart_old(pthread_descr th)
+{
+ if (atomic_increment(&th->p_resume_count) == -1)
+ kill(th->p_pid, __pthread_sig_restart);
+}
+
+void __pthread_suspend_old(pthread_descr self)
+{
+ if (atomic_decrement(&self->p_resume_count) <= 0)
+ __pthread_wait_for_restart_signal(self);
+}
+
+void __pthread_restart_new(pthread_descr th)
+{
+ kill(th->p_pid, __pthread_sig_restart);
+}
+
+/* There is no __pthread_suspend_new because it would just
+ be a wasteful wrapper for __pthread_wait_for_restart_signal */
+
/* Debugging aid */
#ifdef DEBUG
diff --git a/linuxthreads/queue.h b/linuxthreads/queue.h
index fa8c5d861d..f87322f84a 100644
--- a/linuxthreads/queue.h
+++ b/linuxthreads/queue.h
@@ -43,13 +43,14 @@ static inline pthread_descr dequeue(pthread_descr * q)
return th;
}
-static inline void remove_from_queue(pthread_descr * q, pthread_descr th)
+static inline int remove_from_queue(pthread_descr * q, pthread_descr th)
{
for (; *q != NULL; q = &((*q)->p_nextwaiting)) {
if (*q == th) {
*q = th->p_nextwaiting;
th->p_nextwaiting = NULL;
- return;
+ return 1;
}
}
+ return 0;
}
diff --git a/linuxthreads/restart.h b/linuxthreads/restart.h
index 749201391d..702d7d15c6 100644
--- a/linuxthreads/restart.h
+++ b/linuxthreads/restart.h
@@ -18,41 +18,10 @@
static inline void restart(pthread_descr th)
{
- kill(th->p_pid, __pthread_sig_restart);
+ __pthread_restart(th); /* see pthread.c */
}
static inline void suspend(pthread_descr self)
{
- sigset_t mask;
-
- sigprocmask(SIG_SETMASK, NULL, &mask); /* Get current signal mask */
- sigdelset(&mask, __pthread_sig_restart); /* Unblock the restart signal */
- do {
- self->p_signal = 0;
- sigsuspend(&mask); /* Wait for signal */
- } while (self->p_signal !=__pthread_sig_restart );
-}
-
-#define suspend_with_cancellation(self) \
-{ \
- sigset_t mask; \
- sigjmp_buf jmpbuf; \
- \
- sigprocmask(SIG_SETMASK, NULL, &mask); /* Get current signal mask */ \
- sigdelset(&mask, __pthread_sig_restart); /* Unblock the restart signal */ \
- /* No need to save the signal mask, we'll restore it ourselves */ \
- if (sigsetjmp(jmpbuf, 0) == 0) { \
- self->p_cancel_jmp = &jmpbuf; \
- if (! (self->p_canceled \
- && self->p_cancelstate == PTHREAD_CANCEL_ENABLE)) { \
- do { \
- self->p_signal = 0; \
- sigsuspend(&mask); /* Wait for a signal */ \
- } while (self->p_signal != __pthread_sig_restart); \
- } \
- self->p_cancel_jmp = NULL; \
- } else { \
- sigaddset(&mask, __pthread_sig_restart); /* Reblock the restart signal */ \
- sigprocmask(SIG_SETMASK, &mask, NULL); \
- } \
+ __pthread_suspend(self); /* see pthread.c */
}
diff --git a/linuxthreads/rwlock.c b/linuxthreads/rwlock.c
index 1fb18a3f9e..1d78b78cdf 100644
--- a/linuxthreads/rwlock.c
+++ b/linuxthreads/rwlock.c
@@ -23,8 +23,8 @@
#include <pthread.h>
#include "internals.h"
#include "queue.h"
-#include "restart.h"
#include "spinlock.h"
+#include "restart.h"
int
pthread_rwlock_init (pthread_rwlock_t *rwlock,
diff --git a/linuxthreads/semaphore.c b/linuxthreads/semaphore.c
index cfad97aced..0297b3a1e3 100644
--- a/linuxthreads/semaphore.c
+++ b/linuxthreads/semaphore.c
@@ -38,9 +38,31 @@ int __new_sem_init(sem_t *sem, int pshared, unsigned int value)
return 0;
}
+/* Function called by pthread_cancel to remove the thread from
+ waiting inside __new_sem_wait. */
+
+static int new_sem_extricate_func(void *obj, pthread_descr th)
+{
+ volatile pthread_descr self = thread_self();
+ sem_t *sem = obj;
+ int did_remove = 0;
+
+ __pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, self);
+ did_remove = remove_from_queue(&sem->__sem_waiting, th);
+ __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+
+ return did_remove;
+}
+
int __new_sem_wait(sem_t * sem)
{
volatile pthread_descr self = thread_self();
+ pthread_extricate_if extr;
+ int already_canceled = 0;
+
+ /* Set up extrication interface */
+ extr.pu_object = sem;
+ extr.pu_extricate_func = new_sem_extricate_func;
__pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, self);
if (sem->__sem_value > 0) {
@@ -48,17 +70,31 @@ int __new_sem_wait(sem_t * sem)
__pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
return 0;
}
- enqueue(&sem->__sem_waiting, self);
- /* Wait for sem_post or cancellation */
+ /* Register extrication interface */
+ __pthread_set_own_extricate_if(self, &extr);
+ /* Enqueue only if not already cancelled. */
+ if (!(THREAD_GETMEM(self, p_canceled)
+ && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE))
+ enqueue(&sem->__sem_waiting, self);
+ else
+ already_canceled = 1;
__pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
- suspend_with_cancellation(self);
- /* This is a cancellation point */
- if (THREAD_GETMEM(self, p_canceled)
+
+ if (already_canceled) {
+ __pthread_set_own_extricate_if(self, 0);
+ pthread_exit(PTHREAD_CANCELED);
+ }
+
+ /* Wait for sem_post or cancellation, or fall through if already canceled */
+ suspend(self);
+ __pthread_set_own_extricate_if(self, 0);
+
+ /* Terminate only if the wakeup came from cancellation. */
+ /* Otherwise ignore cancellation because we got the semaphore. */
+
+ if (THREAD_GETMEM(self, p_woken_by_cancel)
&& THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE) {
- /* Remove ourselves from the waiting list if we're still on it */
- __pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, self);
- remove_from_queue(&sem->__sem_waiting, self);
- __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+ THREAD_SETMEM(self, p_woken_by_cancel, 0);
pthread_exit(PTHREAD_CANCELED);
}
/* We got the semaphore */
diff --git a/linuxthreads/spinlock.c b/linuxthreads/spinlock.c
index ce6ff9e310..b1a99d9753 100644
--- a/linuxthreads/spinlock.c
+++ b/linuxthreads/spinlock.c
@@ -40,6 +40,7 @@ void internal_function __pthread_lock(struct _pthread_fastlock * lock,
pthread_descr self)
{
long oldstatus, newstatus;
+ int spurious_wakeup_count = 0;
do {
oldstatus = lock->__status;
@@ -56,7 +57,28 @@ void internal_function __pthread_lock(struct _pthread_fastlock * lock,
}
} while(! compare_and_swap(&lock->__status, oldstatus, newstatus,
&lock->__spinlock));
- if (oldstatus != 0) suspend(self);
+
+ /* Suspend with guard against spurious wakeup.
+ This can happen in pthread_cond_timedwait_relative, when the thread
+ wakes up due to timeout and is still on the condvar queue, and then
+ locks the queue to remove itself. At that point it may still be on the
+ queue, and may be resumed by a condition signal. */
+
+ if (oldstatus != 0) {
+ for (;;) {
+ suspend(self);
+ if (self->p_nextlock != NULL) {
+ /* Count resumes that don't belong to us. */
+ spurious_wakeup_count++;
+ continue;
+ }
+ break;
+ }
+ }
+
+ /* Put back any resumes we caught that don't belong to us. */
+ while (spurious_wakeup_count--)
+ restart(self);
}
void internal_function __pthread_unlock(struct _pthread_fastlock * lock)
diff --git a/linuxthreads/spinlock.h b/linuxthreads/spinlock.h
index 29f030406c..aae18a27b4 100644
--- a/linuxthreads/spinlock.h
+++ b/linuxthreads/spinlock.h
@@ -72,3 +72,31 @@ static inline int __pthread_trylock (struct _pthread_fastlock * lock)
}
#define LOCK_INITIALIZER {0, 0}
+
+/* Operations on pthread_atomic, which is defined in internals.h */
+
+static inline long atomic_increment(struct pthread_atomic *pa)
+{
+ long oldval;
+
+ do {
+ oldval = pa->p_count;
+ } while (!compare_and_swap(&pa->p_count, oldval, oldval + 1, &pa->p_spinlock));
+
+ return oldval;
+}
+
+
+static inline long atomic_decrement(struct pthread_atomic *pa)
+{
+ long oldval;
+
+ do {
+ oldval = pa->p_count;
+ } while (!compare_and_swap(&pa->p_count, oldval, oldval - 1, &pa->p_spinlock));
+
+ return oldval;
+}
+
+#define ATOMIC_INITIALIZER { 0, 0 }
+