aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorUlrich Drepper <drepper@redhat.com>1999-07-09 14:34:22 +0000
committerUlrich Drepper <drepper@redhat.com>1999-07-09 14:34:22 +0000
commit6a1db4ffb67ccb8c3861bb398bff5a033f2d7793 (patch)
treeb185c6ef1324c87cf9917efbcd962c796f78d426
parent56ad7b2ceccc199811378d9033a8fbb7bcc1cc4c (diff)
downloadglibc-6a1db4ffb67ccb8c3861bb398bff5a033f2d7793.tar
glibc-6a1db4ffb67ccb8c3861bb398bff5a033f2d7793.tar.gz
glibc-6a1db4ffb67ccb8c3861bb398bff5a033f2d7793.tar.bz2
glibc-6a1db4ffb67ccb8c3861bb398bff5a033f2d7793.zip
Update.
* elf/rtld.c: Split _dl_start in two pieces to prevent GOT usage before the relocation happened. Patch by Franz Sirl <Franz.Sirl-kernel@lauterbach.com>.
-rw-r--r--ChangeLog4
-rw-r--r--elf/rtld.c31
-rw-r--r--linuxthreads/ChangeLog8
-rw-r--r--linuxthreads/internals.h1
-rw-r--r--linuxthreads/pthread.c2
-rw-r--r--linuxthreads/spinlock.c16
6 files changed, 48 insertions, 14 deletions
diff --git a/ChangeLog b/ChangeLog
index 37b8ff2482..f4dce34eb6 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,9 @@
1999-07-09 Ulrich Drepper <drepper@cygnus.com>
+ * elf/rtld.c: Split _dl_start in two pieces to prevent GOT usage
+ before the relocation happened.
+ Patch by Franz Sirl <Franz.Sirl-kernel@lauterbach.com>.
+
* Versions.def (libnsl): Add GLIBC_2.2.
* nis/Versions (libnsl) [GLIBC_2.2]: Add xdr_ypall.
diff --git a/elf/rtld.c b/elf/rtld.c
index 064cd20dd3..4146c0e9bf 100644
--- a/elf/rtld.c
+++ b/elf/rtld.c
@@ -129,6 +129,9 @@ static hp_timing_t relocate_time;
static hp_timing_t load_time;
extern unsigned long int _dl_num_relocations; /* in dl-lookup.c */
+static ElfW(Addr) _dl_start_final (void *arg, struct link_map *bootstrap_map_p,
+ hp_timing_t start_time);
+
#ifdef RTLD_START
RTLD_START
#else
@@ -140,7 +143,6 @@ _dl_start (void *arg)
{
struct link_map bootstrap_map;
hp_timing_t start_time;
- ElfW(Addr) start_addr;
/* This #define produces dynamic linking inline functions for
bootstrap relocation instead of general-purpose relocation. */
@@ -175,7 +177,22 @@ _dl_start (void *arg)
/* Now life is sane; we can call functions and access global data.
Set up to use the operating system facilities, and find out from
the operating system's program loader where to find the program
- header table in core. */
+ header table in core. Put the rest of _dl_start into a separate
+ function, that way the compiler cannot put accesses to the GOT
+ before ELF_DYNAMIC_RELOCATE. */
+ return _dl_start_final (arg, &bootstrap_map, start_time);
+}
+
+
+static ElfW(Addr)
+_dl_start_final (void *arg, struct link_map *bootstrap_map_p,
+ hp_timing_t start_time)
+{
+ /* The use of `alloca' here looks ridiculous but it helps. The goal
+ is to avoid the function from being inlined. There is no official
+ way to do this so we use this trick. gcc never inlines functions
+ which use `alloca'. */
+ ElfW(Addr) *start_addr = alloca (sizeof (ElfW(Addr)));
if (HP_TIMING_AVAIL)
{
@@ -188,10 +205,10 @@ _dl_start (void *arg)
}
/* Transfer data about ourselves to the permanent link_map structure. */
- _dl_rtld_map.l_addr = bootstrap_map.l_addr;
- _dl_rtld_map.l_ld = bootstrap_map.l_ld;
+ _dl_rtld_map.l_addr = bootstrap_map_p->l_addr;
+ _dl_rtld_map.l_ld = bootstrap_map_p->l_ld;
_dl_rtld_map.l_opencount = 1;
- memcpy (_dl_rtld_map.l_info, bootstrap_map.l_info,
+ memcpy (_dl_rtld_map.l_info, bootstrap_map_p->l_info,
sizeof _dl_rtld_map.l_info);
_dl_setup_hash (&_dl_rtld_map);
@@ -203,7 +220,7 @@ _dl_start (void *arg)
file access. It will call `dl_main' (below) to do all the real work
of the dynamic linker, and then unwind our frame and run the user
entry point on the same stack we entered on. */
- start_addr = _dl_sysdep_start (arg, &dl_main);
+ *start_addr = _dl_sysdep_start (arg, &dl_main);
if (HP_TIMING_AVAIL)
{
@@ -219,7 +236,7 @@ _dl_start (void *arg)
if (_dl_debug_statistics)
print_statistics ();
- return start_addr;
+ return *start_addr;
}
/* Now life is peachy; we can do all normal operations.
diff --git a/linuxthreads/ChangeLog b/linuxthreads/ChangeLog
index ab97529a83..dfb0e3d66a 100644
--- a/linuxthreads/ChangeLog
+++ b/linuxthreads/ChangeLog
@@ -1,3 +1,11 @@
+1999-06-23 Robey Pointer <robey@netscape.com>
+
+ * internals.h: Added p_nextlock entry to separate queueing for a
+ lock from queueing for a CV (sometimes a thread queues on a lock
+ to serialize removing itself from a CV queue).
+ * pthread.c: Added p_nextlock to initializers.
+ * spinlock.c: Changed to use p_nextlock instead of p_nextwaiting.
+
1999-07-09 Ulrich Drepper <drepper@cygnus.com>
* manager.c (pthread_handle_create): Free mmap region after stack
diff --git a/linuxthreads/internals.h b/linuxthreads/internals.h
index db84707312..bcbfbcf5eb 100644
--- a/linuxthreads/internals.h
+++ b/linuxthreads/internals.h
@@ -74,6 +74,7 @@ struct _pthread_descr_struct {
pthread_descr p_nextlive, p_prevlive;
/* Double chaining of active threads */
pthread_descr p_nextwaiting; /* Next element in the queue holding the thr */
+ pthread_descr p_nextlock; /* can be on a queue and waiting on a lock */
pthread_t p_tid; /* Thread identifier */
int p_pid; /* PID of Unix process */
int p_priority; /* Thread priority (== 0 if not realtime) */
diff --git a/linuxthreads/pthread.c b/linuxthreads/pthread.c
index b7cf573fe9..c94e7e6f31 100644
--- a/linuxthreads/pthread.c
+++ b/linuxthreads/pthread.c
@@ -34,6 +34,7 @@ struct _pthread_descr_struct __pthread_initial_thread = {
&__pthread_initial_thread, /* pthread_descr p_nextlive */
&__pthread_initial_thread, /* pthread_descr p_prevlive */
NULL, /* pthread_descr p_nextwaiting */
+ NULL, /* pthread_descr p_nextlock */
PTHREAD_THREADS_MAX, /* pthread_t p_tid */
0, /* int p_pid */
0, /* int p_priority */
@@ -75,6 +76,7 @@ struct _pthread_descr_struct __pthread_manager_thread = {
NULL, /* pthread_descr p_nextlive */
NULL, /* pthread_descr p_prevlive */
NULL, /* pthread_descr p_nextwaiting */
+ NULL, /* pthread_descr p_nextlock */
0, /* int p_tid */
0, /* int p_pid */
0, /* int p_priority */
diff --git a/linuxthreads/spinlock.c b/linuxthreads/spinlock.c
index c8f8f71293..ce6ff9e310 100644
--- a/linuxthreads/spinlock.c
+++ b/linuxthreads/spinlock.c
@@ -27,7 +27,7 @@
1: fastlock is taken, no thread is waiting on it
ADDR: fastlock is taken, ADDR is address of thread descriptor for
first waiting thread, other waiting threads are linked via
- their p_nextwaiting field.
+ their p_nextlock field.
The waiting list is not sorted by priority order.
Actually, we always insert at top of list (sole insertion mode
that can be performed without locking).
@@ -50,8 +50,10 @@ void internal_function __pthread_lock(struct _pthread_fastlock * lock,
self = thread_self();
newstatus = (long) self;
}
- if (self != NULL)
- THREAD_SETMEM(self, p_nextwaiting, (pthread_descr) oldstatus);
+ if (self != NULL) {
+ ASSERT(self->p_nextlock == NULL);
+ THREAD_SETMEM(self, p_nextlock, (pthread_descr) oldstatus);
+ }
} while(! compare_and_swap(&lock->__status, oldstatus, newstatus,
&lock->__spinlock));
if (oldstatus != 0) suspend(self);
@@ -83,7 +85,7 @@ again:
maxptr = ptr;
maxprio = thr->p_priority;
}
- ptr = &(thr->p_nextwaiting);
+ ptr = &(thr->p_nextlock);
thr = *ptr;
}
/* Remove max prio thread from waiting list. */
@@ -92,16 +94,16 @@ again:
to guard against concurrent lock operation */
thr = (pthread_descr) oldstatus;
if (! compare_and_swap(&lock->__status,
- oldstatus, (long)(thr->p_nextwaiting),
+ oldstatus, (long)(thr->p_nextlock),
&lock->__spinlock))
goto again;
} else {
/* No risk of concurrent access, remove max prio thread normally */
thr = *maxptr;
- *maxptr = thr->p_nextwaiting;
+ *maxptr = thr->p_nextlock;
}
/* Wake up the selected waiting thread */
- thr->p_nextwaiting = NULL;
+ thr->p_nextlock = NULL;
restart(thr);
}