diff options
author | Ulrich Drepper <drepper@redhat.com> | 2000-06-26 08:01:33 +0000 |
---|---|---|
committer | Ulrich Drepper <drepper@redhat.com> | 2000-06-26 08:01:33 +0000 |
commit | c49ebf7645b828a4a82b69791dc23e19b7264e21 (patch) | |
tree | 235fa4a4995064709979e7b7b4faec8568efd928 /linuxthreads | |
parent | d82e4c7bb231c9e0f835bd46467563ac3b56cebe (diff) | |
download | glibc-c49ebf7645b828a4a82b69791dc23e19b7264e21.tar glibc-c49ebf7645b828a4a82b69791dc23e19b7264e21.tar.gz glibc-c49ebf7645b828a4a82b69791dc23e19b7264e21.tar.bz2 glibc-c49ebf7645b828a4a82b69791dc23e19b7264e21.zip |
Update.
2000-06-26 Ulrich Drepper <drepper@redhat.com>
* Makefile (tests): Add ex11. Add rules to build it.
* Examples/ex11.c: New file.
* rwlock.c: Fix complete braindamaged previous try to implement
timedout functions.
* spinlock.c: Pretty print.
Diffstat (limited to 'linuxthreads')
-rw-r--r-- | linuxthreads/ChangeLog | 9 | ||||
-rw-r--r-- | linuxthreads/Examples/ex11.c | 152 | ||||
-rw-r--r-- | linuxthreads/Makefile | 3 | ||||
-rw-r--r-- | linuxthreads/rwlock.c | 136 | ||||
-rw-r--r-- | linuxthreads/spinlock.c | 14 |
5 files changed, 278 insertions, 36 deletions
diff --git a/linuxthreads/ChangeLog b/linuxthreads/ChangeLog index 7cb6477c62..7ead5c3fcb 100644 --- a/linuxthreads/ChangeLog +++ b/linuxthreads/ChangeLog @@ -1,3 +1,12 @@ +2000-06-26 Ulrich Drepper <drepper@redhat.com> + + * Makefile (tests): Add ex11. Add rules to build it. + * Examples/ex11.c: New file. + * rwlock.c: Fix complete braindamaged previous try to implement + timedout functions. + + * spinlock.c: Pretty print. + 2000-06-25 Ulrich Drepper <drepper@redhat.com> * Makefile (tests): Add ex10. Add rules to build it. diff --git a/linuxthreads/Examples/ex11.c b/linuxthreads/Examples/ex11.c new file mode 100644 index 0000000000..fb09d64561 --- /dev/null +++ b/linuxthreads/Examples/ex11.c @@ -0,0 +1,152 @@ +/* Test program for timedout read/write lock functions. + Copyright (C) 2000 Free Software Foundation, Inc. + Contributed by Ulrich Drepper <drepper@redhat.com>, 2000. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Library General Public License as + published by the Free Software Foundation; either version 2 of the + License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Library General Public License for more details. + + You should have received a copy of the GNU Library General Public + License along with the GNU C Library; see the file COPYING.LIB. If not, + write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, + Boston, MA 02111-1307, USA. */ + +#include <errno.h> +#include <error.h> +#include <pthread.h> +#include <stdio.h> +#include <stdlib.h> +#include <time.h> +#include <unistd.h> + + +#define NWRITERS 15 +#define WRITETRIES 10 +#define NREADERS 15 +#define READTRIES 15 + +#define TIMEOUT 1000000 +#define DELAY 1000000 + +static pthread_rwlock_t lock = PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP; + + +static void * +writer_thread (void *nr) +{ + struct timespec ts; + struct timespec delay; + int n; + + ts.tv_sec = 0; + ts.tv_nsec = TIMEOUT; + + delay.tv_sec = 0; + delay.tv_nsec = DELAY; + + for (n = 0; n < WRITETRIES; ++n) + { + do + { + clock_gettime (CLOCK_REALTIME, &ts); + + ts.tv_nsec += 2 * TIMEOUT; + + printf ("writer thread %ld tries again\n", (long int) nr); + } + //while (pthread_rwlock_wrlock (&lock), 0); + while (pthread_rwlock_timedwrlock (&lock, &ts) == ETIMEDOUT); + + printf ("writer thread %ld succeeded\n", (long int) nr); + + nanosleep (&delay, NULL); + + pthread_rwlock_unlock (&lock); + + printf ("writer thread %ld released\n", (long int) nr); + } + + return NULL; +} + + +static void * +reader_thread (void *nr) +{ + struct timespec ts; + struct timespec delay; + int n; + + delay.tv_sec = 0; + delay.tv_nsec = DELAY; + + for (n = 0; n < READTRIES; ++n) + { + do + { + clock_gettime (CLOCK_REALTIME, &ts); + + ts.tv_nsec += TIMEOUT; + + printf ("reader thread %ld tries again\n", (long int) nr); + } + //while (pthread_rwlock_rdlock (&lock), 0); + while (pthread_rwlock_timedrdlock (&lock, &ts) == ETIMEDOUT); + + printf ("reader thread %ld succeeded\n", (long int) nr); + + nanosleep (&delay, NULL); + + pthread_rwlock_unlock (&lock); + + printf ("reader thread %ld released\n", (long int) nr); + } + + return NULL; +} + + +int +main (void) +{ + pthread_t thwr[NWRITERS]; + pthread_t thrd[NREADERS]; + int n; + void *res; + + /* Make standard error the same as standard output. */ + dup2 (1, 2); + + /* Make sure we see all message, even those on stdout. */ + setvbuf (stdout, NULL, _IONBF, 0); + + for (n = 0; n < NWRITERS; ++n) + { + int err = pthread_create (&thwr[n], NULL, writer_thread, (void *) n); + + if (err != 0) + error (EXIT_FAILURE, err, "cannot create writer thread"); + } + + for (n = 0; n < NREADERS; ++n) + { + int err = pthread_create (&thrd[n], NULL, reader_thread, (void *) n); + + if (err != 0) + error (EXIT_FAILURE, err, "cannot create reader thread"); + } + + /* Wait for all the threads. */ + for (n = 0; n < NWRITERS; ++n) + pthread_join (thwr[n], &res); + for (n = 0; n < NREADERS; ++n) + pthread_join (thrd[n], &res); + + return 0; +} diff --git a/linuxthreads/Makefile b/linuxthreads/Makefile index 5c36bd22eb..0bdaaf291c 100644 --- a/linuxthreads/Makefile +++ b/linuxthreads/Makefile @@ -38,7 +38,7 @@ libpthread-routines := attr cancel condvar join manager mutex ptfork \ oldsemaphore events getcpuclockid pspinlock barrier vpath %.c Examples -tests = ex1 ex2 ex3 ex4 ex5 ex6 ex7 ex8 ex9 ex10 joinrace +tests = ex1 ex2 ex3 ex4 ex5 ex6 ex7 ex8 ex9 ex10 ex11 joinrace include ../Rules @@ -72,4 +72,5 @@ $(objpfx)ex7: $(libpthread) $(objpfx)ex8: $(libpthread) $(objpfx)ex9: $(libpthread) $(objpfx)ex10: $(libpthread) $(librt) +$(objpfx)ex11: $(libpthread) $(librt) $(objpfx)joinrace: $(libpthread) diff --git a/linuxthreads/rwlock.c b/linuxthreads/rwlock.c index 6ee5b62247..2bcdf97de0 100644 --- a/linuxthreads/rwlock.c +++ b/linuxthreads/rwlock.c @@ -27,6 +27,35 @@ #include "spinlock.h" #include "restart.h" +/* Function called by pthread_cancel to remove the thread from + waiting inside pthread_rwlock_timedrdlock or pthread_rwlock_timedwrlock. */ + +static int rwlock_rd_extricate_func(void *obj, pthread_descr th) +{ + volatile pthread_descr self = thread_self(); + pthread_rwlock_t *rwlock = obj; + int did_remove = 0; + + __pthread_lock((struct _pthread_fastlock *) &rwlock->__rw_lock, self); + did_remove = remove_from_queue(&rwlock->__rw_read_waiting, th); + __pthread_unlock((struct _pthread_fastlock *) &rwlock->__rw_lock); + + return did_remove; +} + +static int rwlock_wr_extricate_func(void *obj, pthread_descr th) +{ + volatile pthread_descr self = thread_self(); + pthread_rwlock_t *rwlock = obj; + int did_remove = 0; + + __pthread_lock((struct _pthread_fastlock *) &rwlock->__rw_lock, self); + did_remove = remove_from_queue(&rwlock->__rw_write_waiting, th); + __pthread_unlock((struct _pthread_fastlock *) &rwlock->__rw_lock); + + return did_remove; +} + /* * Check whether the calling thread already owns one or more read locks on the * specified lock. If so, return a pointer to the read lock info structure @@ -214,10 +243,10 @@ __pthread_rwlock_destroy (pthread_rwlock_t *rwlock) int readers; _pthread_descr writer; - __pthread_alt_lock (&rwlock->__rw_lock, NULL); + __pthread_lock (&rwlock->__rw_lock, NULL); readers = rwlock->__rw_readers; writer = rwlock->__rw_writer; - __pthread_alt_unlock (&rwlock->__rw_lock); + __pthread_unlock (&rwlock->__rw_lock); if (readers > 0 || writer != NULL) return EBUSY; @@ -241,18 +270,18 @@ __pthread_rwlock_rdlock (pthread_rwlock_t *rwlock) for (;;) { - __pthread_alt_lock (&rwlock->__rw_lock, self); + __pthread_lock (&rwlock->__rw_lock, self); if (rwlock_can_rdlock(rwlock, have_lock_already)) break; enqueue (&rwlock->__rw_read_waiting, self); - __pthread_alt_unlock (&rwlock->__rw_lock); + __pthread_unlock (&rwlock->__rw_lock); suspend (self); /* This is not a cancellation point */ } ++rwlock->__rw_readers; - __pthread_alt_unlock (&rwlock->__rw_lock); + __pthread_unlock (&rwlock->__rw_lock); if (have_lock_already || out_of_mem) { @@ -273,6 +302,7 @@ __pthread_rwlock_timedrdlock (pthread_rwlock_t *rwlock, pthread_descr self = NULL; pthread_readlock_info *existing; int out_of_mem, have_lock_already; + pthread_extricate_if extr; if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) return EINVAL; @@ -283,21 +313,46 @@ __pthread_rwlock_timedrdlock (pthread_rwlock_t *rwlock, if (self == NULL) self = thread_self (); + /* Set up extrication interface */ + extr.pu_object = rwlock; + extr.pu_extricate_func = rwlock_rd_extricate_func; + + /* Register extrication interface */ + __pthread_set_own_extricate_if (self, &extr); + for (;;) { - if (__pthread_alt_timedlock (&rwlock->__rw_lock, self, abstime) == 0) - return ETIMEDOUT; + __pthread_lock (&rwlock->__rw_lock, self); if (rwlock_can_rdlock(rwlock, have_lock_already)) break; enqueue (&rwlock->__rw_read_waiting, self); - __pthread_alt_unlock (&rwlock->__rw_lock); - suspend (self); /* This is not a cancellation point */ + __pthread_unlock (&rwlock->__rw_lock); + /* This is not a cancellation point */ + if (timedsuspend (self, abstime) == 0) + { + int was_on_queue; + + __pthread_lock (&rwlock->__rw_lock, self); + was_on_queue = remove_from_queue (&rwlock->__rw_read_waiting, self); + __pthread_unlock (&rwlock->__rw_lock); + + if (was_on_queue) + { + __pthread_set_own_extricate_if (self, 0); + return ETIMEDOUT; + } + + /* Eat the outstanding restart() from the signaller */ + suspend (self); + } } + __pthread_set_own_extricate_if (self, 0); + ++rwlock->__rw_readers; - __pthread_alt_unlock (&rwlock->__rw_lock); + __pthread_unlock (&rwlock->__rw_lock); if (have_lock_already || out_of_mem) { @@ -322,7 +377,7 @@ __pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock) have_lock_already = rwlock_have_already(&self, rwlock, &existing, &out_of_mem); - __pthread_alt_lock (&rwlock->__rw_lock, self); + __pthread_lock (&rwlock->__rw_lock, self); /* 0 is passed to here instead of have_lock_already. This is to meet Single Unix Spec requirements: @@ -336,7 +391,7 @@ __pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock) retval = 0; } - __pthread_alt_unlock (&rwlock->__rw_lock); + __pthread_unlock (&rwlock->__rw_lock); if (retval == 0) { @@ -361,17 +416,17 @@ __pthread_rwlock_wrlock (pthread_rwlock_t *rwlock) while(1) { - __pthread_alt_lock (&rwlock->__rw_lock, self); + __pthread_lock (&rwlock->__rw_lock, self); if (rwlock->__rw_readers == 0 && rwlock->__rw_writer == NULL) { rwlock->__rw_writer = self; - __pthread_alt_unlock (&rwlock->__rw_lock); + __pthread_unlock (&rwlock->__rw_lock); return 0; } /* Suspend ourselves, then try again */ enqueue (&rwlock->__rw_write_waiting, self); - __pthread_alt_unlock (&rwlock->__rw_lock); + __pthread_unlock (&rwlock->__rw_lock); suspend (self); /* This is not a cancellation point */ } } @@ -383,28 +438,53 @@ __pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock, const struct timespec *abstime) { pthread_descr self; + pthread_extricate_if extr; if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) return EINVAL; self = thread_self (); + /* Set up extrication interface */ + extr.pu_object = rwlock; + extr.pu_extricate_func = rwlock_wr_extricate_func; + + /* Register extrication interface */ + __pthread_set_own_extricate_if (self, &extr); + while(1) { - if (__pthread_alt_timedlock (&rwlock->__rw_lock, self, abstime) == 0) - return ETIMEDOUT; + __pthread_lock (&rwlock->__rw_lock, self); if (rwlock->__rw_readers == 0 && rwlock->__rw_writer == NULL) { rwlock->__rw_writer = self; - __pthread_alt_unlock (&rwlock->__rw_lock); + __pthread_set_own_extricate_if (self, 0); + __pthread_unlock (&rwlock->__rw_lock); return 0; } /* Suspend ourselves, then try again */ enqueue (&rwlock->__rw_write_waiting, self); - __pthread_alt_unlock (&rwlock->__rw_lock); - suspend (self); /* This is not a cancellation point */ + __pthread_unlock (&rwlock->__rw_lock); + /* This is not a cancellation point */ + if (timedsuspend (self, abstime) == 0) + { + int was_on_queue; + + __pthread_lock (&rwlock->__rw_lock, self); + was_on_queue = remove_from_queue (&rwlock->__rw_write_waiting, self); + __pthread_unlock (&rwlock->__rw_lock); + + if (was_on_queue) + { + __pthread_set_own_extricate_if (self, 0); + return ETIMEDOUT; + } + + /* Eat the outstanding restart() from the signaller */ + suspend (self); + } } } strong_alias (__pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock) @@ -415,13 +495,13 @@ __pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock) { int result = EBUSY; - __pthread_alt_lock (&rwlock->__rw_lock, NULL); + __pthread_lock (&rwlock->__rw_lock, NULL); if (rwlock->__rw_readers == 0 && rwlock->__rw_writer == NULL) { rwlock->__rw_writer = thread_self (); result = 0; } - __pthread_alt_unlock (&rwlock->__rw_lock); + __pthread_unlock (&rwlock->__rw_lock); return result; } @@ -434,13 +514,13 @@ __pthread_rwlock_unlock (pthread_rwlock_t *rwlock) pthread_descr torestart; pthread_descr th; - __pthread_alt_lock (&rwlock->__rw_lock, NULL); + __pthread_lock (&rwlock->__rw_lock, NULL); if (rwlock->__rw_writer != NULL) { /* Unlocking a write lock. */ if (rwlock->__rw_writer != thread_self ()) { - __pthread_alt_unlock (&rwlock->__rw_lock); + __pthread_unlock (&rwlock->__rw_lock); return EPERM; } rwlock->__rw_writer = NULL; @@ -452,14 +532,14 @@ __pthread_rwlock_unlock (pthread_rwlock_t *rwlock) /* Restart all waiting readers. */ torestart = rwlock->__rw_read_waiting; rwlock->__rw_read_waiting = NULL; - __pthread_alt_unlock (&rwlock->__rw_lock); + __pthread_unlock (&rwlock->__rw_lock); while ((th = dequeue (&torestart)) != NULL) restart (th); } else { /* Restart one waiting writer. */ - __pthread_alt_unlock (&rwlock->__rw_lock); + __pthread_unlock (&rwlock->__rw_lock); restart (th); } } @@ -468,7 +548,7 @@ __pthread_rwlock_unlock (pthread_rwlock_t *rwlock) /* Unlocking a read lock. */ if (rwlock->__rw_readers == 0) { - __pthread_alt_unlock (&rwlock->__rw_lock); + __pthread_unlock (&rwlock->__rw_lock); return EPERM; } @@ -479,7 +559,7 @@ __pthread_rwlock_unlock (pthread_rwlock_t *rwlock) else th = NULL; - __pthread_alt_unlock (&rwlock->__rw_lock); + __pthread_unlock (&rwlock->__rw_lock); if (th != NULL) restart (th); diff --git a/linuxthreads/spinlock.c b/linuxthreads/spinlock.c index 5cd772602c..c91a7cfa84 100644 --- a/linuxthreads/spinlock.c +++ b/linuxthreads/spinlock.c @@ -149,7 +149,7 @@ again: return 0; } -/* +/* * Alternate fastlocks do not queue threads directly. Instead, they queue * these wait queue node structures. When a timed wait wakes up due to * a timeout, it can leave its wait node in the queue (because there @@ -191,7 +191,7 @@ static struct wait_node *wait_node_alloc(void) return (struct wait_node *) oldvalue; } -/* Return a node to the head of the free list using an atomic +/* Return a node to the head of the free list using an atomic operation. */ static void wait_node_free(struct wait_node *wn) @@ -210,7 +210,7 @@ static void wait_node_free(struct wait_node *wn) /* Remove a wait node from the specified queue. It is assumed that the removal takes place concurrently with only atomic insertions at the head of the queue. */ - + static void wait_node_dequeue(struct wait_node **pp_head, struct wait_node **pp_node, struct wait_node *p_node, @@ -281,7 +281,7 @@ int __pthread_alt_timedlock(struct _pthread_fastlock * lock, { struct wait_node *p_wait_node = wait_node_alloc(); long oldstatus, newstatus; - + /* Out of memory, just give up and do ordinary lock. */ if (p_wait_node == 0) { __pthread_alt_lock(lock, self); @@ -342,13 +342,13 @@ void __pthread_alt_unlock(struct _pthread_fastlock *lock) oldstatus = lock->__status; if (oldstatus == 0 || oldstatus == 1) { - if (compare_and_swap_with_release_semantics (&lock->__status, - oldstatus, 0, &lock->__spinlock)) + if (compare_and_swap_with_release_semantics (&lock->__status, oldstatus, + 0, &lock->__spinlock)) return; else continue; } - + /* Process the entire queue of wait nodes. Remove all abandoned wait nodes and put them into the global free queue, and remember the one unabandoned node which refers to the thread |