diff options
author | Ulrich Drepper <drepper@redhat.com> | 1998-06-25 19:36:00 +0000 |
---|---|---|
committer | Ulrich Drepper <drepper@redhat.com> | 1998-06-25 19:36:00 +0000 |
commit | 3387a425e65b839b68bd2973f6bc5ab22315cc5d (patch) | |
tree | 375713a0b865b10b9eddd9c9877ad68cf0bdc851 /linuxthreads/semaphore.c | |
parent | d47aac39992cb1dd705d8c584f4d3979d7ce4602 (diff) | |
download | glibc-3387a425e65b839b68bd2973f6bc5ab22315cc5d.tar glibc-3387a425e65b839b68bd2973f6bc5ab22315cc5d.tar.gz glibc-3387a425e65b839b68bd2973f6bc5ab22315cc5d.tar.bz2 glibc-3387a425e65b839b68bd2973f6bc5ab22315cc5d.zip |
Finish user stack support. Change locking code to be safe in situations with different priorities.
1998-06-25 19:27 Ulrich Drepper <drepper@cygnus.com>
* attr.c: Finish user stack support. Change locking code to be safe
in situations with different priorities.
* cancel.c: Likewise.
* condvar.c: Likewise.
* internals.h: Likewise.
* join.c: Likewise.
* manager.c: Likewise.
* mutex.c: Likewise.
* pthread.c: Likewise.
* ptlongjmp.c: Likewise.
* queue.h: Likewise.
* rwlock.c: Likewise.
* semaphore.c: Likewise.
* semaphore.h: Likewise.
* signals.c: Likewise.
* spinlock.c: Likewise.
* spinlock.h: Likewise.
Patches by Xavier leroy.
1998-06-25 Ulrich Drepper <drepper@cygnus.com>
* sysdeps/pthread/pthread.h: Make [sg]et_stacksize and
[sg]et_stackaddr prototypes always available.
* sysdeps/unix/sysv/linux/bits/posix_opt.h: Define
_POSIX_THREAD_ATTR_STACKSIZE and _POSIX_THREAD_ATTR_STACKADDR.
Diffstat (limited to 'linuxthreads/semaphore.c')
-rw-r--r-- | linuxthreads/semaphore.c | 236 |
1 files changed, 67 insertions, 169 deletions
diff --git a/linuxthreads/semaphore.c b/linuxthreads/semaphore.c index bd07439cde..7ca609f6b2 100644 --- a/linuxthreads/semaphore.c +++ b/linuxthreads/semaphore.c @@ -17,69 +17,13 @@ #include "pthread.h" #include "semaphore.h" #include "internals.h" -#include "restart.h" - - -#if !defined HAS_COMPARE_AND_SWAP && !defined TEST_FOR_COMPARE_AND_SWAP -/* If we have no atomic compare and swap, fake it using an extra spinlock. */ - -#include "spinlock.h" -static inline int sem_compare_and_swap(sem_t *sem, long oldval, long newval) -{ - int ret; - acquire(&sem->sem_spinlock); - ret = (sem->sem_status == oldval); - if (ret) sem->sem_status = newval; - release(&sem->sem_spinlock); - return ret; -} - -#elif defined TEST_FOR_COMPARE_AND_SWAP - #include "spinlock.h" -static int has_compare_and_swap = -1; /* to be determined at run-time */ - -static inline int sem_compare_and_swap(sem_t *sem, long oldval, long newval) -{ - int ret; - - if (has_compare_and_swap == 1) - return __compare_and_swap(&sem->sem_status, oldval, newval); - - acquire(&sem->sem_spinlock); - ret = (sem->sem_status == oldval); - if (ret) sem->sem_status = newval; - release(&sem->sem_spinlock); - return ret; -} - -#else -/* But if we do have an atomic compare and swap, use it! */ - -static inline int sem_compare_and_swap(sem_t *sem, long oldval, long newval) -{ - return __compare_and_swap(&sem->sem_status, oldval, newval); -} - -#endif - - -/* The state of a semaphore is represented by a long int encoding - either the semaphore count if >= 0 and no thread is waiting on it, - or the head of the list of threads waiting for the semaphore. - To distinguish the two cases, we encode the semaphore count N - as 2N+1, so that it has the lowest bit set. - - A sequence of sem_wait operations on a semaphore initialized to N - result in the following successive states: - 2N+1, 2N-1, ..., 3, 1, &first_waiting_thread, &second_waiting_thread, ... -*/ - -static void sem_restart_list(pthread_descr waiting); +#include "restart.h" +#include "queue.h" int sem_init(sem_t *sem, int pshared, unsigned int value) { - if ((long)value > SEM_VALUE_MAX) { + if (value > SEM_VALUE_MAX) { errno = EINVAL; return -1; } @@ -87,150 +31,104 @@ int sem_init(sem_t *sem, int pshared, unsigned int value) errno = ENOSYS; return -1; } -#ifdef TEST_FOR_COMPARE_AND_SWAP - if (has_compare_and_swap == -1) { - has_compare_and_swap = compare_and_swap_is_available(); - } -#endif -#if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP - sem->sem_spinlock = 0; -#endif - sem->sem_status = ((long)value << 1) + 1; + __pthread_init_lock((struct _pthread_fastlock *) &sem->sem_lock); + sem->sem_value = value; + sem->sem_waiting = NULL; return 0; } int sem_wait(sem_t * sem) { - long oldstatus, newstatus; - volatile pthread_descr self = thread_self(); - pthread_descr * th; + volatile pthread_descr self; - while (1) { - do { - oldstatus = sem->sem_status; - if ((oldstatus & 1) && (oldstatus != 1)) - newstatus = oldstatus - 2; - else { - newstatus = (long) self; - self->p_nextwaiting = (pthread_descr) oldstatus; - } - } - while (! sem_compare_and_swap(sem, oldstatus, newstatus)); - if (newstatus & 1) - /* We got the semaphore. */ - return 0; - /* Wait for sem_post or cancellation */ - suspend_with_cancellation(self); - /* This is a cancellation point */ - if (self->p_canceled && self->p_cancelstate == PTHREAD_CANCEL_ENABLE) { - /* Remove ourselves from the waiting list if we're still on it */ - /* First check if we're at the head of the list. */ - do { - oldstatus = sem->sem_status; - if (oldstatus != (long) self) break; - newstatus = (long) self->p_nextwaiting; - } - while (! sem_compare_and_swap(sem, oldstatus, newstatus)); - /* Now, check if we're somewhere in the list. - There's a race condition with sem_post here, but it does not matter: - the net result is that at the time pthread_exit is called, - self is no longer reachable from sem->sem_status. */ - if (oldstatus != (long) self && (oldstatus & 1) == 0) { - for (th = &(((pthread_descr) oldstatus)->p_nextwaiting); - *th != NULL && *th != (pthread_descr) 1; - th = &((*th)->p_nextwaiting)) { - if (*th == self) { - *th = self->p_nextwaiting; - break; - } - } - } - pthread_exit(PTHREAD_CANCELED); - } + __pthread_lock((struct _pthread_fastlock *) &sem->sem_lock); + if (sem->sem_value > 0) { + sem->sem_value--; + __pthread_unlock((struct _pthread_fastlock *) &sem->sem_lock); + return 0; + } + self = thread_self(); + enqueue(&sem->sem_waiting, self); + /* Wait for sem_post or cancellation */ + __pthread_unlock((struct _pthread_fastlock *) &sem->sem_lock); + suspend_with_cancellation(self); + /* This is a cancellation point */ + if (self->p_canceled && self->p_cancelstate == PTHREAD_CANCEL_ENABLE) { + /* Remove ourselves from the waiting list if we're still on it */ + __pthread_lock((struct _pthread_fastlock *) &sem->sem_lock); + remove_from_queue(&sem->sem_waiting, self); + __pthread_unlock((struct _pthread_fastlock *) &sem->sem_lock); + pthread_exit(PTHREAD_CANCELED); } + /* We got the semaphore */ + return 0; } int sem_trywait(sem_t * sem) { - long oldstatus, newstatus; - - do { - oldstatus = sem->sem_status; - if ((oldstatus & 1) == 0 || (oldstatus == 1)) { - errno = EAGAIN; - return -1; - } - newstatus = oldstatus - 2; + int retval; + + __pthread_lock((struct _pthread_fastlock *) &sem->sem_lock); + if (sem->sem_value == 0) { + errno = EAGAIN; + retval = -1; + } else { + sem->sem_value--; + retval = 0; } - while (! sem_compare_and_swap(sem, oldstatus, newstatus)); - return 0; + return retval; } int sem_post(sem_t * sem) { - long oldstatus, newstatus; - - do { - oldstatus = sem->sem_status; - if ((oldstatus & 1) == 0) - newstatus = 3; - else { - if (oldstatus >= SEM_VALUE_MAX) { + pthread_descr self = thread_self(); + pthread_descr th; + struct pthread_request request; + + if (self->p_in_sighandler == NULL) { + __pthread_lock((struct _pthread_fastlock *) &sem->sem_lock); + if (sem->sem_waiting == NULL) { + if (sem->sem_value >= SEM_VALUE_MAX) { /* Overflow */ errno = ERANGE; + __pthread_unlock((struct _pthread_fastlock *) &sem->sem_lock); + return -1; + } + sem->sem_value++; + __pthread_unlock((struct _pthread_fastlock *) &sem->sem_lock); + } else { + th = dequeue(&sem->sem_waiting); + __pthread_unlock((struct _pthread_fastlock *) &sem->sem_lock); + restart(th); + } + } else { + /* If we're in signal handler, delegate post operation to + the thread manager. */ + if (__pthread_manager_request < 0) { + if (__pthread_initialize_manager() < 0) { + errno = EAGAIN; return -1; } - newstatus = oldstatus + 2; } + request.req_kind = REQ_POST; + request.req_args.post = sem; + __libc_write(__pthread_manager_request, + (char *) &request, sizeof(request)); } - while (! sem_compare_and_swap(sem, oldstatus, newstatus)); - if ((oldstatus & 1) == 0) - sem_restart_list((pthread_descr) oldstatus); return 0; } int sem_getvalue(sem_t * sem, int * sval) { - long status = sem->sem_status; - if (status & 1) - *sval = (int)((unsigned long) status >> 1); - else - *sval = 0; + *sval = sem->sem_value; return 0; } int sem_destroy(sem_t * sem) { - if ((sem->sem_status & 1) == 0) { + if (sem->sem_waiting != NULL) { errno = EBUSY; return -1; } return 0; } - -/* Auxiliary function for restarting all threads on a waiting list, - in priority order. */ - -static void sem_restart_list(pthread_descr waiting) -{ - pthread_descr th, towake, *p; - - /* Sort list of waiting threads by decreasing priority (insertion sort) */ - towake = NULL; - while (waiting != (pthread_descr) 1) { - th = waiting; - waiting = waiting->p_nextwaiting; - p = &towake; - while (*p != NULL && th->p_priority < (*p)->p_priority) - p = &((*p)->p_nextwaiting); - th->p_nextwaiting = *p; - *p = th; - } - /* Wake up threads in priority order */ - while (towake != NULL) { - th = towake; - towake = towake->p_nextwaiting; - th->p_nextwaiting = NULL; - restart(th); - } -} |