aboutsummaryrefslogtreecommitdiff
path: root/elf/dl-open.c
diff options
context:
space:
mode:
Diffstat (limited to 'elf/dl-open.c')
-rw-r--r--elf/dl-open.c139
1 files changed, 37 insertions, 102 deletions
diff --git a/elf/dl-open.c b/elf/dl-open.c
index 32e7caac7f..c997521126 100644
--- a/elf/dl-open.c
+++ b/elf/dl-open.c
@@ -1,5 +1,5 @@
/* Load a shared object at runtime, relocate it, and run its initializer.
- Copyright (C) 1996-2004, 2005, 2006, 2007 Free Software Foundation, Inc.
+ Copyright (C) 1996-2004, 2005, 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -32,7 +32,6 @@
#include <bp-sym.h>
#include <caller.h>
#include <sysdep-cancel.h>
-#include <tls.h>
#include <dl-dst.h>
@@ -98,17 +97,17 @@ add_to_global (struct link_map *new)
in an realloc() call. Therefore we allocate a completely new
array the first time we have to add something to the locale scope. */
- struct link_namespaces *ns = &GL(dl_ns)[new->l_ns];
- if (ns->_ns_global_scope_alloc == 0)
+ if (GL(dl_ns)[new->l_ns]._ns_global_scope_alloc == 0)
{
/* This is the first dynamic object given global scope. */
- ns->_ns_global_scope_alloc
- = ns->_ns_main_searchlist->r_nlist + to_add + 8;
+ GL(dl_ns)[new->l_ns]._ns_global_scope_alloc
+ = GL(dl_ns)[new->l_ns]._ns_main_searchlist->r_nlist + to_add + 8;
new_global = (struct link_map **)
- malloc (ns->_ns_global_scope_alloc * sizeof (struct link_map *));
+ malloc (GL(dl_ns)[new->l_ns]._ns_global_scope_alloc
+ * sizeof (struct link_map *));
if (new_global == NULL)
{
- ns->_ns_global_scope_alloc = 0;
+ GL(dl_ns)[new->l_ns]._ns_global_scope_alloc = 0;
nomem:
_dl_signal_error (ENOMEM, new->l_libname->name, NULL,
N_("cannot extend global scope"));
@@ -116,39 +115,29 @@ add_to_global (struct link_map *new)
}
/* Copy over the old entries. */
- ns->_ns_main_searchlist->r_list
- = memcpy (new_global, ns->_ns_main_searchlist->r_list,
- (ns->_ns_main_searchlist->r_nlist
+ GL(dl_ns)[new->l_ns]._ns_main_searchlist->r_list
+ = memcpy (new_global,
+ GL(dl_ns)[new->l_ns]._ns_main_searchlist->r_list,
+ (GL(dl_ns)[new->l_ns]._ns_main_searchlist->r_nlist
* sizeof (struct link_map *)));
}
- else if (ns->_ns_main_searchlist->r_nlist + to_add
- > ns->_ns_global_scope_alloc)
+ else if (GL(dl_ns)[new->l_ns]._ns_main_searchlist->r_nlist + to_add
+ > GL(dl_ns)[new->l_ns]._ns_global_scope_alloc)
{
/* We have to extend the existing array of link maps in the
main map. */
- struct link_map **old_global
- = GL(dl_ns)[new->l_ns]._ns_main_searchlist->r_list;
- size_t new_nalloc = ((ns->_ns_global_scope_alloc + to_add) * 2);
-
new_global = (struct link_map **)
- malloc (new_nalloc * sizeof (struct link_map *));
+ realloc (GL(dl_ns)[new->l_ns]._ns_main_searchlist->r_list,
+ ((GL(dl_ns)[new->l_ns]._ns_global_scope_alloc + to_add + 8)
+ * sizeof (struct link_map *)));
if (new_global == NULL)
goto nomem;
- memcpy (new_global, old_global,
- ns->_ns_global_scope_alloc * sizeof (struct link_map *));
-
- ns->_ns_global_scope_alloc = new_nalloc;
- ns->_ns_main_searchlist->r_list = new_global;
-
- if (!RTLD_SINGLE_THREAD_P)
- THREAD_GSCOPE_WAIT ();
-
- free (old_global);
+ GL(dl_ns)[new->l_ns]._ns_global_scope_alloc += to_add + 8;
+ GL(dl_ns)[new->l_ns]._ns_main_searchlist->r_list = new_global;
}
/* Now add the new entries. */
- unsigned int new_nlist = ns->_ns_main_searchlist->r_nlist;
for (cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
{
struct link_map *map = new->l_searchlist.r_list[cnt];
@@ -156,49 +145,15 @@ add_to_global (struct link_map *new)
if (map->l_global == 0)
{
map->l_global = 1;
- ns->_ns_main_searchlist->r_list[new_nlist++] = map;
+ GL(dl_ns)[new->l_ns]._ns_main_searchlist->r_list[GL(dl_ns)[new->l_ns]._ns_main_searchlist->r_nlist]
+ = map;
+ ++GL(dl_ns)[new->l_ns]._ns_main_searchlist->r_nlist;
}
}
- atomic_write_barrier ();
- ns->_ns_main_searchlist->r_nlist = new_nlist;
return 0;
}
-int
-_dl_scope_free (struct r_scope_elem **old)
-{
- struct dl_scope_free_list *fsl;
-#define DL_SCOPE_FREE_LIST_SIZE (sizeof (fsl->list) / sizeof (fsl->list[0]))
-
- if (RTLD_SINGLE_THREAD_P)
- free (old);
- else if ((fsl = GL(dl_scope_free_list)) == NULL)
- {
- GL(dl_scope_free_list) = fsl = malloc (sizeof (*fsl));
- if (fsl == NULL)
- {
- THREAD_GSCOPE_WAIT ();
- free (old);
- return 1;
- }
- else
- {
- fsl->list[0] = old;
- fsl->count = 1;
- }
- }
- else if (fsl->count < DL_SCOPE_FREE_LIST_SIZE)
- fsl->list[fsl->count++] = old;
- else
- {
- THREAD_GSCOPE_WAIT ();
- while (fsl->count > 0)
- free (fsl->list[--fsl->count]);
- return 1;
- }
- return 0;
-}
static void
dl_open_worker (void *a)
@@ -206,12 +161,10 @@ dl_open_worker (void *a)
struct dl_open_args *args = a;
const char *file = args->file;
int mode = args->mode;
- struct link_map *new;
+ struct link_map *new, *l;
int lazy;
unsigned int i;
-#ifdef USE_TLS
bool any_tls = false;
-#endif
struct link_map *call_map = NULL;
/* Check whether _dl_open() has been called from a valid DSO. */
@@ -233,14 +186,13 @@ dl_open_worker (void *a)
By default we assume this is the main application. */
call_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
- struct link_map *l;
for (Lmid_t ns = 0; ns < DL_NNS; ++ns)
for (l = GL(dl_ns)[ns]._ns_loaded; l != NULL; l = l->l_next)
if (caller_dlopen >= (const void *) l->l_map_start
- && caller_dlopen < (const void *) l->l_map_end
- && (l->l_contiguous
- || _dl_addr_inside_object (l, (ElfW(Addr)) caller_dlopen)))
+ && caller_dlopen < (const void *) l->l_map_end)
{
+ /* There must be exactly one DSO for the range of the virtual
+ memory. Otherwise something is really broken. */
assert (ns == l->l_ns);
call_map = l;
goto found_caller;
@@ -373,7 +325,7 @@ dl_open_worker (void *a)
/* Relocate the objects loaded. We do this in reverse order so that copy
relocs of earlier objects overwrite the data written by later objects. */
- struct link_map *l = new;
+ l = new;
while (l->l_next)
l = l->l_next;
while (1)
@@ -465,10 +417,17 @@ dl_open_worker (void *a)
memcpy (newp, imap->l_scope, cnt * sizeof (imap->l_scope[0]));
struct r_scope_elem **old = imap->l_scope;
- imap->l_scope = newp;
+ if (RTLD_SINGLE_THREAD_P)
+ imap->l_scope = newp;
+ else
+ {
+ __rtld_mrlock_change (imap->l_scope_lock);
+ imap->l_scope = newp;
+ __rtld_mrlock_done (imap->l_scope_lock);
+ }
if (old != imap->l_scope_mem)
- _dl_scope_free (old);
+ free (old);
imap->l_scope_max = new_size;
}
@@ -480,7 +439,6 @@ dl_open_worker (void *a)
atomic_write_barrier ();
imap->l_scope[cnt] = &new->l_searchlist;
}
-#if USE_TLS
/* Only add TLS memory if this object is loaded now and
therefore is not yet initialized. */
else if (! imap->l_init_called
@@ -495,11 +453,11 @@ dl_open_worker (void *a)
if (imap->l_need_tls_init)
{
imap->l_need_tls_init = 0;
-# ifdef SHARED
+#ifdef SHARED
/* Update the slot information data for at least the
generation of the DSO we are allocating data for. */
_dl_update_slotinfo (imap->l_tls_modid);
-# endif
+#endif
GL(dl_init_static_tls) (imap);
assert (imap->l_need_tls_init == 0);
@@ -508,15 +466,12 @@ dl_open_worker (void *a)
/* We have to bump the generation counter. */
any_tls = true;
}
-#endif
}
-#if USE_TLS
/* Bump the generation number if necessary. */
if (any_tls && __builtin_expect (++GL(dl_tls_generation) == 0, 0))
_dl_fatal_printf (N_("\
TLS generation counter wrapped! Please report this."));
-#endif
/* Run the initializer functions of new objects. */
_dl_init (new, args->argc, args->argv, args->env);
@@ -613,7 +568,6 @@ no more namespaces available for dlmopen()"));
state if relocation failed, for example. */
if (args.map)
{
-#ifdef USE_TLS
/* Maybe some of the modules which were loaded use TLS.
Since it will be removed in the following _dl_close call
we have to mark the dtv array as having gaps to fill the
@@ -623,7 +577,6 @@ no more namespaces available for dlmopen()"));
up. */
if ((mode & __RTLD_AUDIT) == 0)
GL(dl_tls_dtv_gaps) = true;
-#endif
_dl_close_worker (args.map);
}
@@ -697,21 +650,3 @@ show_scope (struct link_map *new)
}
}
#endif
-
-#ifdef IS_IN_rtld
-/* Return non-zero if ADDR lies within one of L's segments. */
-int
-internal_function
-_dl_addr_inside_object (struct link_map *l, const ElfW(Addr) addr)
-{
- int n = l->l_phnum;
- const ElfW(Addr) reladdr = addr - l->l_addr;
-
- while (--n >= 0)
- if (l->l_phdr[n].p_type == PT_LOAD
- && reladdr - l->l_phdr[n].p_vaddr >= 0
- && reladdr - l->l_phdr[n].p_vaddr < l->l_phdr[n].p_memsz)
- return 1;
- return 0;
-}
-#endif