aboutsummaryrefslogtreecommitdiff
path: root/elf
diff options
context:
space:
mode:
Diffstat (limited to 'elf')
-rw-r--r--elf/dl-close.c113
-rw-r--r--elf/dl-fini.c152
-rw-r--r--elf/dl-init.c16
-rw-r--r--elf/dso-sort-tests-1.def19
-rw-r--r--elf/tst-audit23.c44
5 files changed, 173 insertions, 171 deletions
diff --git a/elf/dl-close.c b/elf/dl-close.c
index c9a7d06577..1c7a861db1 100644
--- a/elf/dl-close.c
+++ b/elf/dl-close.c
@@ -138,31 +138,30 @@ _dl_close_worker (struct link_map *map, bool force)
bool any_tls = false;
const unsigned int nloaded = ns->_ns_nloaded;
+ struct link_map *maps[nloaded];
- /* Run over the list and assign indexes to the link maps. */
+ /* Run over the list and assign indexes to the link maps and enter
+ them into the MAPS array. */
int idx = 0;
for (struct link_map *l = ns->_ns_loaded; l != NULL; l = l->l_next)
{
l->l_map_used = 0;
l->l_map_done = 0;
l->l_idx = idx;
+ maps[idx] = l;
++idx;
}
assert (idx == nloaded);
- /* Keep marking link maps until no new link maps are found. */
- for (struct link_map *l = ns->_ns_loaded; l != NULL; )
+ /* Keep track of the lowest index link map we have covered already. */
+ int done_index = -1;
+ while (++done_index < nloaded)
{
- /* next is reset to earlier link maps for remarking. */
- struct link_map *next = l->l_next;
- int next_idx = l->l_idx + 1; /* next->l_idx, but covers next == NULL. */
+ struct link_map *l = maps[done_index];
if (l->l_map_done)
- {
- /* Already handled. */
- l = next;
- continue;
- }
+ /* Already handled. */
+ continue;
/* Check whether this object is still used. */
if (l->l_type == lt_loaded
@@ -172,10 +171,7 @@ _dl_close_worker (struct link_map *map, bool force)
acquire is sufficient and correct. */
&& atomic_load_acquire (&l->l_tls_dtor_count) == 0
&& !l->l_map_used)
- {
- l = next;
- continue;
- }
+ continue;
/* We need this object and we handle it now. */
l->l_map_used = 1;
@@ -202,11 +198,8 @@ _dl_close_worker (struct link_map *map, bool force)
already processed it, then we need to go back
and process again from that point forward to
ensure we keep all of its dependencies also. */
- if ((*lp)->l_idx < next_idx)
- {
- next = *lp;
- next_idx = next->l_idx;
- }
+ if ((*lp)->l_idx - 1 < done_index)
+ done_index = (*lp)->l_idx - 1;
}
}
@@ -226,65 +219,44 @@ _dl_close_worker (struct link_map *map, bool force)
if (!jmap->l_map_used)
{
jmap->l_map_used = 1;
- if (jmap->l_idx < next_idx)
- {
- next = jmap;
- next_idx = next->l_idx;
- }
+ if (jmap->l_idx - 1 < done_index)
+ done_index = jmap->l_idx - 1;
}
}
}
-
- l = next;
}
- /* Call the destructors in reverse constructor order, and remove the
- closed link maps from the list. */
- for (struct link_map **init_called_head = &_dl_init_called_list;
- *init_called_head != NULL; )
+ /* Sort the entries. We can skip looking for the binary itself which is
+ at the front of the search list for the main namespace. */
+ _dl_sort_maps (maps, nloaded, (nsid == LM_ID_BASE), true);
+
+ /* Call all termination functions at once. */
+ bool unload_any = false;
+ bool scope_mem_left = false;
+ unsigned int unload_global = 0;
+ unsigned int first_loaded = ~0;
+ for (unsigned int i = 0; i < nloaded; ++i)
{
- struct link_map *imap = *init_called_head;
+ struct link_map *imap = maps[i];
- /* _dl_init_called_list is global, to produce a global odering.
- Ignore the other namespaces (and link maps that are still used). */
- if (imap->l_ns != nsid || imap->l_map_used)
- init_called_head = &imap->l_init_called_next;
- else
+ /* All elements must be in the same namespace. */
+ assert (imap->l_ns == nsid);
+
+ if (!imap->l_map_used)
{
assert (imap->l_type == lt_loaded && !imap->l_nodelete_active);
- /* _dl_init_called_list is updated at the same time as
- l_init_called. */
- assert (imap->l_init_called);
-
- if (imap->l_info[DT_FINI_ARRAY] != NULL
- || imap->l_info[DT_FINI] != NULL)
+ /* Call its termination function. Do not do it for
+ half-cooked objects. Temporarily disable exception
+ handling, so that errors are fatal. */
+ if (imap->l_init_called)
_dl_catch_exception (NULL, _dl_call_fini, imap);
#ifdef SHARED
/* Auditing checkpoint: we remove an object. */
_dl_audit_objclose (imap);
#endif
- /* Unlink this link map. */
- *init_called_head = imap->l_init_called_next;
- }
- }
-
-
- bool unload_any = false;
- bool scope_mem_left = false;
- unsigned int unload_global = 0;
-
- /* For skipping un-unloadable link maps in the second loop. */
- struct link_map *first_loaded = ns->_ns_loaded;
- /* Iterate over the namespace to find objects to unload. Some
- unloadable objects may not be on _dl_init_called_list due to
- dlopen failure. */
- for (struct link_map *imap = first_loaded; imap != NULL; imap = imap->l_next)
- {
- if (!imap->l_map_used)
- {
/* This object must not be used anymore. */
imap->l_removed = 1;
@@ -295,8 +267,8 @@ _dl_close_worker (struct link_map *map, bool force)
++unload_global;
/* Remember where the first dynamically loaded object is. */
- if (first_loaded == NULL)
- first_loaded = imap;
+ if (i < first_loaded)
+ first_loaded = i;
}
/* Else imap->l_map_used. */
else if (imap->l_type == lt_loaded)
@@ -432,8 +404,8 @@ _dl_close_worker (struct link_map *map, bool force)
imap->l_loader = NULL;
/* Remember where the first dynamically loaded object is. */
- if (first_loaded == NULL)
- first_loaded = imap;
+ if (i < first_loaded)
+ first_loaded = i;
}
}
@@ -504,11 +476,10 @@ _dl_close_worker (struct link_map *map, bool force)
/* Check each element of the search list to see if all references to
it are gone. */
- for (struct link_map *imap = first_loaded; imap != NULL; )
+ for (unsigned int i = first_loaded; i < nloaded; ++i)
{
- if (imap->l_map_used)
- imap = imap->l_next;
- else
+ struct link_map *imap = maps[i];
+ if (!imap->l_map_used)
{
assert (imap->l_type == lt_loaded);
@@ -719,9 +690,7 @@ _dl_close_worker (struct link_map *map, bool force)
if (imap == GL(dl_initfirst))
GL(dl_initfirst) = NULL;
- struct link_map *next = imap->l_next;
free (imap);
- imap = next;
}
}
diff --git a/elf/dl-fini.c b/elf/dl-fini.c
index e201d36651..9acb64f47c 100644
--- a/elf/dl-fini.c
+++ b/elf/dl-fini.c
@@ -24,68 +24,116 @@
void
_dl_fini (void)
{
- /* Call destructors strictly in the reverse order of constructors.
- This causes fewer surprises than some arbitrary reordering based
- on new (relocation) dependencies. None of the objects are
- unmapped, so applications can deal with this if their DSOs remain
- in a consistent state after destructors have run. */
-
- /* Protect against concurrent loads and unloads. */
- __rtld_lock_lock_recursive (GL(dl_load_lock));
-
- /* Ignore objects which are opened during shutdown. */
- struct link_map *local_init_called_list = _dl_init_called_list;
-
- for (struct link_map *l = local_init_called_list; l != NULL;
- l = l->l_init_called_next)
- /* Bump l_direct_opencount of all objects so that they
- are not dlclose()ed from underneath us. */
- ++l->l_direct_opencount;
-
- /* After this point, everything linked from local_init_called_list
- cannot be unloaded because of the reference counter update. */
- __rtld_lock_unlock_recursive (GL(dl_load_lock));
-
- /* Perform two passes: One for non-audit modules, one for audit
- modules. This way, audit modules receive unload notifications
- for non-audit objects, and the destructors for audit modules
- still run. */
+ /* Lots of fun ahead. We have to call the destructors for all still
+ loaded objects, in all namespaces. The problem is that the ELF
+ specification now demands that dependencies between the modules
+ are taken into account. I.e., the destructor for a module is
+ called before the ones for any of its dependencies.
+
+ To make things more complicated, we cannot simply use the reverse
+ order of the constructors. Since the user might have loaded objects
+ using `dlopen' there are possibly several other modules with its
+ dependencies to be taken into account. Therefore we have to start
+ determining the order of the modules once again from the beginning. */
+
+ /* We run the destructors of the main namespaces last. As for the
+ other namespaces, we pick run the destructors in them in reverse
+ order of the namespace ID. */
+#ifdef SHARED
+ int do_audit = 0;
+ again:
+#endif
+ for (Lmid_t ns = GL(dl_nns) - 1; ns >= 0; --ns)
+ {
+ /* Protect against concurrent loads and unloads. */
+ __rtld_lock_lock_recursive (GL(dl_load_lock));
+
+ unsigned int nloaded = GL(dl_ns)[ns]._ns_nloaded;
+ /* No need to do anything for empty namespaces or those used for
+ auditing DSOs. */
+ if (nloaded == 0
+#ifdef SHARED
+ || GL(dl_ns)[ns]._ns_loaded->l_auditing != do_audit
+#endif
+ )
+ __rtld_lock_unlock_recursive (GL(dl_load_lock));
+ else
+ {
#ifdef SHARED
- int last_pass = GLRO(dl_naudit) > 0;
- Lmid_t last_ns = -1;
- for (int do_audit = 0; do_audit <= last_pass; ++do_audit)
+ _dl_audit_activity_nsid (ns, LA_ACT_DELETE);
#endif
- for (struct link_map *l = local_init_called_list; l != NULL;
- l = l->l_init_called_next)
- {
+
+ /* Now we can allocate an array to hold all the pointers and
+ copy the pointers in. */
+ struct link_map *maps[nloaded];
+
+ unsigned int i;
+ struct link_map *l;
+ assert (nloaded != 0 || GL(dl_ns)[ns]._ns_loaded == NULL);
+ for (l = GL(dl_ns)[ns]._ns_loaded, i = 0; l != NULL; l = l->l_next)
+ /* Do not handle ld.so in secondary namespaces. */
+ if (l == l->l_real)
+ {
+ assert (i < nloaded);
+
+ maps[i] = l;
+ l->l_idx = i;
+ ++i;
+
+ /* Bump l_direct_opencount of all objects so that they
+ are not dlclose()ed from underneath us. */
+ ++l->l_direct_opencount;
+ }
+ assert (ns != LM_ID_BASE || i == nloaded);
+ assert (ns == LM_ID_BASE || i == nloaded || i == nloaded - 1);
+ unsigned int nmaps = i;
+
+ /* Now we have to do the sorting. We can skip looking for the
+ binary itself which is at the front of the search list for
+ the main namespace. */
+ _dl_sort_maps (maps, nmaps, (ns == LM_ID_BASE), true);
+
+ /* We do not rely on the linked list of loaded object anymore
+ from this point on. We have our own list here (maps). The
+ various members of this list cannot vanish since the open
+ count is too high and will be decremented in this loop. So
+ we release the lock so that some code which might be called
+ from a destructor can directly or indirectly access the
+ lock. */
+ __rtld_lock_unlock_recursive (GL(dl_load_lock));
+
+ /* 'maps' now contains the objects in the right order. Now
+ call the destructors. We have to process this array from
+ the front. */
+ for (i = 0; i < nmaps; ++i)
+ {
+ struct link_map *l = maps[i];
+
+ if (l->l_init_called)
+ {
+ _dl_call_fini (l);
#ifdef SHARED
- if (GL(dl_ns)[l->l_ns]._ns_loaded->l_auditing != do_audit)
- continue;
-
- /* Avoid back-to-back calls of _dl_audit_activity_nsid for the
- same namespace. */
- if (last_ns != l->l_ns)
- {
- if (last_ns >= 0)
- _dl_audit_activity_nsid (last_ns, LA_ACT_CONSISTENT);
- _dl_audit_activity_nsid (l->l_ns, LA_ACT_DELETE);
- last_ns = l->l_ns;
- }
+ /* Auditing checkpoint: another object closed. */
+ _dl_audit_objclose (l);
#endif
+ }
- /* There is no need to re-enable exceptions because _dl_fini
- is not called from a context where exceptions are caught. */
- _dl_call_fini (l);
+ /* Correct the previous increment. */
+ --l->l_direct_opencount;
+ }
#ifdef SHARED
- /* Auditing checkpoint: another object closed. */
- _dl_audit_objclose (l);
+ _dl_audit_activity_nsid (ns, LA_ACT_CONSISTENT);
#endif
- }
+ }
+ }
#ifdef SHARED
- if (last_ns >= 0)
- _dl_audit_activity_nsid (last_ns, LA_ACT_CONSISTENT);
+ if (! do_audit && GLRO(dl_naudit) > 0)
+ {
+ do_audit = 1;
+ goto again;
+ }
if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_STATISTICS))
_dl_debug_printf ("\nruntime linker statistics:\n"
diff --git a/elf/dl-init.c b/elf/dl-init.c
index ffd05b7806..ba4d2fdc85 100644
--- a/elf/dl-init.c
+++ b/elf/dl-init.c
@@ -21,7 +21,6 @@
#include <ldsodefs.h>
#include <elf-initfini.h>
-struct link_map *_dl_init_called_list;
static void
call_init (struct link_map *l, int argc, char **argv, char **env)
@@ -43,21 +42,6 @@ call_init (struct link_map *l, int argc, char **argv, char **env)
dependency. */
l->l_init_called = 1;
- /* Help an already-running dlclose: The just-loaded object must not
- be removed during the current pass. (No effect if no dlclose in
- progress.) */
- l->l_map_used = 1;
-
- /* Record execution before starting any initializers. This way, if
- the initializers themselves call dlopen, their ELF destructors
- will eventually be run before this object is destructed, matching
- that their ELF constructors have run before this object was
- constructed. _dl_fini uses this list for audit callbacks, so
- register objects on the list even if they do not have a
- constructor. */
- l->l_init_called_next = _dl_init_called_list;
- _dl_init_called_list = l;
-
/* Check for object which constructors we do not run here. */
if (__builtin_expect (l->l_name[0], 'a') == '\0'
&& l->l_type == lt_executable)
diff --git a/elf/dso-sort-tests-1.def b/elf/dso-sort-tests-1.def
index 61dc54f8ae..4bf9052db1 100644
--- a/elf/dso-sort-tests-1.def
+++ b/elf/dso-sort-tests-1.def
@@ -53,14 +53,21 @@ tst-dso-ordering10: {}->a->b->c;soname({})=c
output: b>a>{}<a<b
# Complex example from Bugzilla #15311, under-linked and with circular
-# relocation(dynamic) dependencies. For both sorting algorithms, the
-# destruction order is the reverse of the construction order, and
-# relocation dependencies are not taken into account.
+# relocation(dynamic) dependencies. While this is technically unspecified, the
+# presumed reasonable practical behavior is for the destructor order to respect
+# the static DT_NEEDED links (here this means the a->b->c->d order).
+# The older dynamic_sort=1 algorithm does not achieve this, while the DFS-based
+# dynamic_sort=2 algorithm does, although it is still arguable whether going
+# beyond spec to do this is the right thing to do.
+# The below expected outputs are what the two algorithms currently produce
+# respectively, for regression testing purposes.
tst-bz15311: {+a;+e;+f;+g;+d;%d;-d;-g;-f;-e;-a};a->b->c->d;d=>[ba];c=>a;b=>e=>a;c=>f=>b;d=>g=>c
-output: {+a[d>c>b>a>];+e[e>];+f[f>];+g[g>];+d[];%d(b(e(a()))a()g(c(a()f(b(e(a()))))));-d[];-g[];-f[];-e[];-a[<g<f<e<a<b<c<d];}
+output(glibc.rtld.dynamic_sort=1): {+a[d>c>b>a>];+e[e>];+f[f>];+g[g>];+d[];%d(b(e(a()))a()g(c(a()f(b(e(a()))))));-d[];-g[];-f[];-e[];-a[<a<c<d<g<f<b<e];}
+output(glibc.rtld.dynamic_sort=2): {+a[d>c>b>a>];+e[e>];+f[f>];+g[g>];+d[];%d(b(e(a()))a()g(c(a()f(b(e(a()))))));-d[];-g[];-f[];-e[];-a[<g<f<a<b<c<d<e];}
# Test that even in the presence of dependency loops involving dlopen'ed
# object, that object is initialized last (and not unloaded prematurely).
-# Final destructor order is the opposite of constructor order.
+# Final destructor order is indeterminate due to the cycle.
tst-bz28937: {+a;+b;-b;+c;%c};a->a1;a->a2;a2->a;b->b1;c->a1;c=>a1
-output: {+a[a2>a1>a>];+b[b1>b>];-b[<b<b1];+c[c>];%c(a1());}<c<a<a1<a2
+output(glibc.rtld.dynamic_sort=1): {+a[a2>a1>a>];+b[b1>b>];-b[<b<b1];+c[c>];%c(a1());}<a<a2<c<a1
+output(glibc.rtld.dynamic_sort=2): {+a[a2>a1>a>];+b[b1>b>];-b[<b<b1];+c[c>];%c(a1());}<a2<a<c<a1
diff --git a/elf/tst-audit23.c b/elf/tst-audit23.c
index 503699c36a..bb7d66c385 100644
--- a/elf/tst-audit23.c
+++ b/elf/tst-audit23.c
@@ -98,8 +98,6 @@ do_test (int argc, char *argv[])
char *lname;
uintptr_t laddr;
Lmid_t lmid;
- uintptr_t cookie;
- uintptr_t namespace;
bool closed;
} objs[max_objs] = { [0 ... max_objs-1] = { .closed = false } };
size_t nobjs = 0;
@@ -119,9 +117,6 @@ do_test (int argc, char *argv[])
size_t buffer_length = 0;
while (xgetline (&buffer, &buffer_length, out))
{
- *strchrnul (buffer, '\n') = '\0';
- printf ("info: subprocess output: %s\n", buffer);
-
if (startswith (buffer, "la_activity: "))
{
uintptr_t cookie;
@@ -130,26 +125,29 @@ do_test (int argc, char *argv[])
&cookie);
TEST_COMPARE (r, 2);
+ /* The cookie identifies the object at the head of the link map,
+ so we only add a new namespace if it changes from the previous
+ one. This works since dlmopen is the last in the test body. */
+ if (cookie != last_act_cookie && last_act_cookie != -1)
+ TEST_COMPARE (last_act, LA_ACT_CONSISTENT);
+
if (this_act == LA_ACT_ADD && acts[nacts] != cookie)
{
- /* The cookie identifies the object at the head of the
- link map, so we only add a new namespace if it
- changes from the previous one. This works since
- dlmopen is the last in the test body. */
- if (cookie != last_act_cookie && last_act_cookie != -1)
- TEST_COMPARE (last_act, LA_ACT_CONSISTENT);
-
acts[nacts++] = cookie;
last_act_cookie = cookie;
}
- /* LA_ACT_DELETE is called multiple times for each
- namespace, depending on destruction order. */
+ /* The LA_ACT_DELETE is called in the reverse order of LA_ACT_ADD
+ at program termination (if the tests adds a dlclose or a library
+ with extra dependencies this will need to be adapted). */
else if (this_act == LA_ACT_DELETE)
- last_act_cookie = cookie;
+ {
+ last_act_cookie = acts[--nacts];
+ TEST_COMPARE (acts[nacts], cookie);
+ acts[nacts] = 0;
+ }
else if (this_act == LA_ACT_CONSISTENT)
{
TEST_COMPARE (cookie, last_act_cookie);
- last_act_cookie = -1;
/* LA_ACT_DELETE must always be followed by an la_objclose. */
if (last_act == LA_ACT_DELETE)
@@ -181,8 +179,6 @@ do_test (int argc, char *argv[])
objs[nobjs].lname = lname;
objs[nobjs].laddr = laddr;
objs[nobjs].lmid = lmid;
- objs[nobjs].cookie = cookie;
- objs[nobjs].namespace = last_act_cookie;
objs[nobjs].closed = false;
nobjs++;
@@ -205,12 +201,6 @@ do_test (int argc, char *argv[])
if (strcmp (lname, objs[i].lname) == 0 && lmid == objs[i].lmid)
{
TEST_COMPARE (objs[i].closed, false);
- TEST_COMPARE (objs[i].cookie, cookie);
- if (objs[i].namespace == -1)
- /* No LA_ACT_ADD before the first la_objopen call. */
- TEST_COMPARE (acts[0], last_act_cookie);
- else
- TEST_COMPARE (objs[i].namespace, last_act_cookie);
objs[i].closed = true;
break;
}
@@ -219,7 +209,11 @@ do_test (int argc, char *argv[])
/* la_objclose should be called after la_activity(LA_ACT_DELETE) for
the closed object's namespace. */
TEST_COMPARE (last_act, LA_ACT_DELETE);
- seen_first_objclose = true;
+ if (!seen_first_objclose)
+ {
+ TEST_COMPARE (last_act_cookie, cookie);
+ seen_first_objclose = true;
+ }
}
}