aboutsummaryrefslogtreecommitdiff
path: root/elf
diff options
context:
space:
mode:
Diffstat (limited to 'elf')
-rw-r--r--elf/dl-close.c6
-rw-r--r--elf/dl-deps.c100
-rw-r--r--elf/dl-fini.c4
-rw-r--r--elf/dl-lookup.c156
-rw-r--r--elf/dl-open.c2
5 files changed, 179 insertions, 89 deletions
diff --git a/elf/dl-close.c b/elf/dl-close.c
index 67188bb6c1..264e13a8ee 100644
--- a/elf/dl-close.c
+++ b/elf/dl-close.c
@@ -203,9 +203,9 @@ _dl_close_worker (struct link_map *map)
}
/* And the same for relocation dependencies. */
if (l->l_reldeps != NULL)
- for (unsigned int j = 0; j < l->l_reldepsact; ++j)
+ for (unsigned int j = 0; j < l->l_reldeps->act; ++j)
{
- struct link_map *jmap = l->l_reldeps[j];
+ struct link_map *jmap = l->l_reldeps->list[j];
if (jmap->l_idx != IDX_STILL_USED)
{
@@ -497,7 +497,7 @@ _dl_close_worker (struct link_map *map)
THREAD_GSCOPE_WAIT ();
/* Now we can free any queued old scopes. */
- struct dl_scope_free_list *fsl = GL(dl_scope_free_list);
+ struct dl_scope_free_list *fsl = GL(dl_scope_free_list);
if (fsl != NULL)
while (fsl->count > 0)
free (fsl->list[--fsl->count]);
diff --git a/elf/dl-deps.c b/elf/dl-deps.c
index 4ec984e15b..34c6024efa 100644
--- a/elf/dl-deps.c
+++ b/elf/dl-deps.c
@@ -1,5 +1,6 @@
/* Load the dependencies of a mapped object.
- Copyright (C) 1996-2003, 2004, 2005, 2006 Free Software Foundation, Inc.
+ Copyright (C) 1996-2003, 2004, 2005, 2006, 2007
+ Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -17,6 +18,7 @@
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA. */
+#include <atomic.h>
#include <assert.h>
#include <dlfcn.h>
#include <errno.h>
@@ -465,15 +467,17 @@ _dl_map_object_deps (struct link_map *map,
{
needed[nneeded++] = NULL;
- l->l_initfini = (struct link_map **)
+ struct link_map **l_initfini = (struct link_map **)
malloc ((2 * nneeded + 1) * sizeof needed[0]);
- if (l->l_initfini == NULL)
+ if (l_initfini == NULL)
_dl_signal_error (ENOMEM, map->l_name, NULL,
N_("cannot allocate dependency list"));
- l->l_initfini[0] = l;
- memcpy (&l->l_initfini[1], needed, nneeded * sizeof needed[0]);
- memcpy (&l->l_initfini[nneeded + 1], l->l_initfini,
+ l_initfini[0] = l;
+ memcpy (&l_initfini[1], needed, nneeded * sizeof needed[0]);
+ memcpy (&l_initfini[nneeded + 1], l_initfini,
nneeded * sizeof needed[0]);
+ atomic_write_barrier ();
+ l->l_initfini = l_initfini;
}
/* If we have no auxiliary objects just go on to the next map. */
@@ -487,25 +491,26 @@ _dl_map_object_deps (struct link_map *map,
if (errno == 0 && errno_saved != 0)
__set_errno (errno_saved);
+ struct link_map **old_l_initfini = NULL;
if (map->l_initfini != NULL && map->l_type == lt_loaded)
{
/* This object was previously loaded as a dependency and we have
a separate l_initfini list. We don't need it anymore. */
assert (map->l_searchlist.r_list == NULL);
- free (map->l_initfini);
+ old_l_initfini = map->l_initfini;
}
/* Store the search list we built in the object. It will be used for
searches in the scope of this object. */
- map->l_initfini =
+ struct link_map **l_initfini =
(struct link_map **) malloc ((2 * nlist + 1)
* sizeof (struct link_map *));
- if (map->l_initfini == NULL)
+ if (l_initfini == NULL)
_dl_signal_error (ENOMEM, map->l_name, NULL,
N_("cannot allocate symbol search list"));
- map->l_searchlist.r_list = &map->l_initfini[nlist + 1];
+ map->l_searchlist.r_list = &l_initfini[nlist + 1];
map->l_searchlist.r_nlist = nlist;
for (nlist = 0, runp = known; runp; runp = runp->next)
@@ -546,10 +551,10 @@ _dl_map_object_deps (struct link_map *map,
Filters not supported with LD_TRACE_PRELINKING"));
}
- cnt = _dl_build_local_scope (map->l_initfini, l);
+ cnt = _dl_build_local_scope (l_initfini, l);
assert (cnt <= nlist);
for (j = 0; j < cnt; j++)
- map->l_initfini[j]->l_reserved = 0;
+ l_initfini[j]->l_reserved = 0;
l->l_local_scope[0] =
(struct r_scope_elem *) malloc (sizeof (struct r_scope_elem)
@@ -561,35 +566,50 @@ Filters not supported with LD_TRACE_PRELINKING"));
l->l_local_scope[0]->r_nlist = cnt;
l->l_local_scope[0]->r_list =
(struct link_map **) (l->l_local_scope[0] + 1);
- memcpy (l->l_local_scope[0]->r_list, map->l_initfini,
+ memcpy (l->l_local_scope[0]->r_list, l_initfini,
cnt * sizeof (struct link_map *));
}
}
/* Maybe we can remove some relocation dependencies now. */
assert (map->l_searchlist.r_list[0] == map);
- for (i = 0; i < map->l_reldepsact; ++i)
+ struct link_map_reldeps *l_reldeps = NULL;
+ if (map->l_reldeps != NULL)
{
- unsigned int j;
+ for (i = 1; i < nlist; ++i)
+ map->l_searchlist.r_list[i]->l_reserved = 1;
- for (j = 1; j < nlist; ++j)
- if (map->l_searchlist.r_list[j] == map->l_reldeps[i])
+ struct link_map **list = &map->l_reldeps->list[0];
+ for (i = 0; i < map->l_reldeps->act; ++i)
+ if (list[i]->l_reserved)
{
- /* A direct or transitive dependency is also on the list
- of relocation dependencies. Remove the latter. */
- for (j = i + 1; j < map->l_reldepsact; ++j)
- map->l_reldeps[j - 1] = map->l_reldeps[j];
-
- --map->l_reldepsact;
-
- /* Account for the '++i' performed by the 'for'. */
- --i;
- break;
+ /* Need to allocate new array of relocation dependencies. */
+ struct link_map_reldeps *l_reldeps;
+ l_reldeps = malloc (sizeof (*l_reldeps)
+ + map->l_reldepsmax
+ * sizeof (struct link_map *));
+ if (l_reldeps == NULL)
+ /* Bad luck, keep the reldeps duplicated between
+ map->l_reldeps->list and map->l_initfini lists. */
+ ;
+ else
+ {
+ unsigned int j = i;
+ memcpy (&l_reldeps->list[0], &list[0],
+ i * sizeof (struct link_map *));
+ for (i = i + 1; i < map->l_reldeps->act; ++i)
+ if (!list[i]->l_reserved)
+ l_reldeps->list[j++] = list[i];
+ l_reldeps->act = j;
+ }
}
+
+ for (i = 1; i < nlist; ++i)
+ map->l_searchlist.r_list[i]->l_reserved = 0;
}
/* Now determine the order in which the initialization has to happen. */
- memcpy (map->l_initfini, map->l_searchlist.r_list,
+ memcpy (l_initfini, map->l_searchlist.r_list,
nlist * sizeof (struct link_map *));
/* We can skip looking for the binary itself which is at the front
of the search list. Look through the list backward so that circular
@@ -602,7 +622,7 @@ Filters not supported with LD_TRACE_PRELINKING"));
/* Find the place in the initfini list where the map is currently
located. */
- for (j = 1; map->l_initfini[j] != l; ++j)
+ for (j = 1; l_initfini[j] != l; ++j)
;
/* Find all object for which the current one is a dependency and
@@ -611,19 +631,18 @@ Filters not supported with LD_TRACE_PRELINKING"));
{
struct link_map **runp;
- runp = map->l_initfini[k]->l_initfini;
+ runp = l_initfini[k]->l_initfini;
if (runp != NULL)
{
while (*runp != NULL)
if (__builtin_expect (*runp++ == l, 0))
{
- struct link_map *here = map->l_initfini[k];
+ struct link_map *here = l_initfini[k];
/* Move it now. */
- memmove (&map->l_initfini[j] + 1,
- &map->l_initfini[j],
+ memmove (&l_initfini[j] + 1, &l_initfini[j],
(k - j) * sizeof (struct link_map *));
- map->l_initfini[j] = here;
+ l_initfini[j] = here;
/* Don't insert further matches before the last
entry moved to the front. */
@@ -635,7 +654,18 @@ Filters not supported with LD_TRACE_PRELINKING"));
}
}
/* Terminate the list of dependencies. */
- map->l_initfini[nlist] = NULL;
+ l_initfini[nlist] = NULL;
+ atomic_write_barrier ();
+ map->l_initfini = l_initfini;
+ if (l_reldeps != NULL)
+ {
+ atomic_write_barrier ();
+ void *old_l_reldeps = map->l_reldeps;
+ map->l_reldeps = l_reldeps;
+ _dl_scope_free (old_l_reldeps);
+ }
+ if (old_l_initfini != NULL)
+ _dl_scope_free (old_l_initfini);
if (errno_reason)
_dl_signal_error (errno_reason == -1 ? 0 : errno_reason, objname,
diff --git a/elf/dl-fini.c b/elf/dl-fini.c
index 3cd7e7bbff..273bc3a99d 100644
--- a/elf/dl-fini.c
+++ b/elf/dl-fini.c
@@ -82,8 +82,8 @@ _dl_sort_fini (struct link_map *l, struct link_map **maps, size_t nmaps,
if (__builtin_expect (maps[k]->l_reldeps != NULL, 0))
{
- unsigned int m = maps[k]->l_reldepsact;
- struct link_map **relmaps = maps[k]->l_reldeps;
+ unsigned int m = maps[k]->l_reldeps->act;
+ struct link_map **relmaps = &maps[k]->l_reldeps->list[0];
while (m-- > 0)
{
diff --git a/elf/dl-lookup.c b/elf/dl-lookup.c
index c529007ca1..92dc7b226a 100644
--- a/elf/dl-lookup.c
+++ b/elf/dl-lookup.c
@@ -88,20 +88,50 @@ static int
internal_function
add_dependency (struct link_map *undef_map, struct link_map *map, int flags)
{
- struct link_map **list;
struct link_map *runp;
- unsigned int act;
unsigned int i;
int result = 0;
- unsigned long long int serial;
/* Avoid self-references and references to objects which cannot be
unloaded anyway. */
if (undef_map == map)
return 0;
+ /* Avoid references to objects which cannot be unloaded anyway. */
+ assert (map->l_type == lt_loaded);
+ if ((map->l_flags_1 & DF_1_NODELETE) != 0)
+ return 0;
+
+ struct link_map_reldeps *l_reldeps
+ = atomic_forced_read (undef_map->l_reldeps);
+
+ /* Make sure l_reldeps is read before l_initfini. */
+ atomic_read_barrier ();
+
+ /* Determine whether UNDEF_MAP already has a reference to MAP. First
+ look in the normal dependencies. */
+ struct link_map **l_initfini = atomic_forced_read (undef_map->l_initfini);
+ if (l_initfini != NULL)
+ {
+ for (i = 0; l_initfini[i] != NULL; ++i)
+ if (l_initfini[i] == map)
+ return 0;
+ }
+
+ /* No normal dependency. See whether we already had to add it
+ to the special list of dynamic dependencies. */
+ unsigned int l_reldepsact = 0;
+ if (l_reldeps != NULL)
+ {
+ struct link_map **list = &l_reldeps->list[0];
+ l_reldepsact = l_reldeps->act;
+ for (i = 0; i < l_reldepsact; ++i)
+ if (list[i] == map)
+ return 0;
+ }
+
/* Save serial number of the target MAP. */
- serial = map->l_serial;
+ unsigned long long serial = map->l_serial;
/* Make sure nobody can unload the object while we are at it. */
if (__builtin_expect (flags & DL_LOOKUP_GSCOPE_LOCK, 0))
@@ -110,38 +140,52 @@ add_dependency (struct link_map *undef_map, struct link_map *map, int flags)
here, that can result in ABBA deadlock. */
THREAD_GSCOPE_RESET_FLAG ();
__rtld_lock_lock_recursive (GL(dl_load_lock));
- THREAD_GSCOPE_SET_FLAG ();
/* While MAP value won't change, after THREAD_GSCOPE_RESET_FLAG ()
it can e.g. point to unallocated memory. So avoid the optimizer
treating the above read from MAP->l_serial as ensurance it
can safely dereference it. */
map = atomic_forced_read (map);
- }
- else
- __rtld_lock_lock_recursive (GL(dl_load_lock));
- /* From this point on it is unsafe to dereference MAP, until it
- has been found in one of the lists. */
+ /* From this point on it is unsafe to dereference MAP, until it
+ has been found in one of the lists. */
- /* Determine whether UNDEF_MAP already has a reference to MAP. First
- look in the normal dependencies. */
- if (undef_map->l_initfini != NULL)
- {
- list = undef_map->l_initfini;
+ /* Redo the l_initfini check in case undef_map's l_initfini
+ changed in the mean time. */
+ if (undef_map->l_initfini != l_initfini
+ && undef_map->l_initfini != NULL)
+ {
+ l_initfini = undef_map->l_initfini;
+ for (i = 0; l_initfini[i] != NULL; ++i)
+ if (l_initfini[i] == map)
+ goto out_check;
+ }
- for (i = 0; list[i] != NULL; ++i)
- if (list[i] == map)
- goto out_check;
+ /* Redo the l_reldeps check if undef_map's l_reldeps changed in
+ the mean time. */
+ if (undef_map->l_reldeps != NULL)
+ {
+ if (undef_map->l_reldeps != l_reldeps)
+ {
+ struct link_map **list = &undef_map->l_reldeps->list[0];
+ l_reldepsact = undef_map->l_reldeps->act;
+ for (i = 0; i < l_reldepsact; ++i)
+ if (list[i] == map)
+ goto out_check;
+ }
+ else if (undef_map->l_reldeps->act > l_reldepsact)
+ {
+ struct link_map **list
+ = &undef_map->l_reldeps->list[0];
+ i = l_reldepsact;
+ l_reldepsact = undef_map->l_reldeps->act;
+ for (; i < l_reldepsact; ++i)
+ if (list[i] == map)
+ goto out_check;
+ }
+ }
}
-
- /* No normal dependency. See whether we already had to add it
- to the special list of dynamic dependencies. */
- list = undef_map->l_reldeps;
- act = undef_map->l_reldepsact;
-
- for (i = 0; i < act; ++i)
- if (list[i] == map)
- goto out_check;
+ else
+ __rtld_lock_lock_recursive (GL(dl_load_lock));
/* The object is not yet in the dependency list. Before we add
it make sure just one more time the object we are about to
@@ -161,8 +205,8 @@ add_dependency (struct link_map *undef_map, struct link_map *map, int flags)
if (map->l_serial != serial)
goto out_check;
- /* Avoid references to objects which cannot be unloaded anyway. */
- assert (map->l_type == lt_loaded);
+ /* Redo the NODELETE check, as when dl_load_lock wasn't held
+ yet this could have changed. */
if ((map->l_flags_1 & DF_1_NODELETE) != 0)
goto out;
@@ -177,33 +221,46 @@ add_dependency (struct link_map *undef_map, struct link_map *map, int flags)
}
/* Add the reference now. */
- if (__builtin_expect (act >= undef_map->l_reldepsmax, 0))
+ if (__builtin_expect (l_reldepsact >= undef_map->l_reldepsmax, 0))
{
/* Allocate more memory for the dependency list. Since this
can never happen during the startup phase we can use
`realloc'. */
- void *newp;
-
- undef_map->l_reldepsmax += 5;
- newp = realloc (undef_map->l_reldeps,
- undef_map->l_reldepsmax
- * sizeof (struct link_map *));
+ struct link_map_reldeps *newp;
+ unsigned int max
+ = undef_map->l_reldepsmax ? undef_map->l_reldepsmax * 2 : 10;
- if (__builtin_expect (newp != NULL, 1))
- undef_map->l_reldeps = (struct link_map **) newp;
+ newp = malloc (sizeof (*newp) + max * sizeof (struct link_map *));
+ if (newp == NULL)
+ {
+ /* If we didn't manage to allocate memory for the list this is
+ no fatal problem. We simply make sure the referenced object
+ cannot be unloaded. This is semantically the correct
+ behavior. */
+ map->l_flags_1 |= DF_1_NODELETE;
+ goto out;
+ }
else
- /* Correct the addition. */
- undef_map->l_reldepsmax -= 5;
+ {
+ if (l_reldepsact)
+ memcpy (&newp->list[0], &undef_map->l_reldeps->list[0],
+ l_reldepsact * sizeof (struct link_map *));
+ newp->list[l_reldepsact] = map;
+ newp->act = l_reldepsact + 1;
+ atomic_write_barrier ();
+ void *old = undef_map->l_reldeps;
+ undef_map->l_reldeps = newp;
+ undef_map->l_reldepsmax = max;
+ if (old)
+ _dl_scope_free (old);
+ }
}
-
- /* If we didn't manage to allocate memory for the list this is
- no fatal mistake. We simply make sure the referenced object
- cannot be unloaded. This is semantically the correct
- behavior. */
- if (__builtin_expect (act < undef_map->l_reldepsmax, 1))
- undef_map->l_reldeps[undef_map->l_reldepsact++] = map;
else
- map->l_flags_1 |= DF_1_NODELETE;
+ {
+ undef_map->l_reldeps->list[l_reldepsact] = map;
+ atomic_write_barrier ();
+ undef_map->l_reldeps->act = l_reldepsact + 1;
+ }
/* Display information if we are debugging. */
if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
@@ -223,6 +280,9 @@ add_dependency (struct link_map *undef_map, struct link_map *map, int flags)
/* Release the lock. */
__rtld_lock_unlock_recursive (GL(dl_load_lock));
+ if (__builtin_expect (flags & DL_LOOKUP_GSCOPE_LOCK, 0))
+ THREAD_GSCOPE_SET_FLAG ();
+
return result;
out_check:
diff --git a/elf/dl-open.c b/elf/dl-open.c
index fda3219ae2..f825aa0437 100644
--- a/elf/dl-open.c
+++ b/elf/dl-open.c
@@ -166,7 +166,7 @@ add_to_global (struct link_map *new)
}
int
-_dl_scope_free (struct r_scope_elem **old)
+_dl_scope_free (void *old)
{
struct dl_scope_free_list *fsl;
#define DL_SCOPE_FREE_LIST_SIZE (sizeof (fsl->list) / sizeof (fsl->list[0]))