diff options
author | Florian Weimer <fweimer@redhat.com> | 2023-10-18 11:30:38 +0200 |
---|---|---|
committer | Florian Weimer <fweimer@redhat.com> | 2023-10-18 11:30:38 +0200 |
commit | dd32e1db386c77c61850a7cbd0c126b7b3c63ece (patch) | |
tree | 9f0c9c3ead1a6be1c63b6841c85ec5597bf10f6a /elf/dl-close.c | |
parent | 2ad9b674cf6cd6ba59c064427cb7aeb43a66d8a9 (diff) | |
download | glibc-dd32e1db386c77c61850a7cbd0c126b7b3c63ece.tar glibc-dd32e1db386c77c61850a7cbd0c126b7b3c63ece.tar.gz glibc-dd32e1db386c77c61850a7cbd0c126b7b3c63ece.tar.bz2 glibc-dd32e1db386c77c61850a7cbd0c126b7b3c63ece.zip |
Revert "elf: Always call destructors in reverse constructor order (bug 30785)"
This reverts commit 6985865bc3ad5b23147ee73466583dd7fdf65892.
Reason for revert:
The commit changes the order of ELF destructor calls too much relative
to what applications expect or can handle. In particular, during
process exit and _dl_fini, after the revert commit, we no longer call
the destructors of the main program first; that only happens after
some dlopen'ed objects have been destructed. This robs applications
of an opportunity to influence destructor order by calling dlclose
explicitly from the main program's ELF destructors. A couple of
different approaches involving reverse constructor order were tried,
and none of them worked really well. It seems we need to keep the
dependency sorting in _dl_fini.
There is also an ambiguity regarding nested dlopen calls from ELF
constructors: Should those destructors run before or after the object
that called dlopen? Commit 6985865bc3ad5b2314 used reverse order
of the start of ELF constructor calls for destructors, but arguably
using completion of constructors is more correct. However, that alone
is not sufficient to address application compatibility issues (it
does not change _dl_fini ordering at all).
Diffstat (limited to 'elf/dl-close.c')
-rw-r--r-- | elf/dl-close.c | 113 |
1 files changed, 41 insertions, 72 deletions
diff --git a/elf/dl-close.c b/elf/dl-close.c index c9a7d06577..1c7a861db1 100644 --- a/elf/dl-close.c +++ b/elf/dl-close.c @@ -138,31 +138,30 @@ _dl_close_worker (struct link_map *map, bool force) bool any_tls = false; const unsigned int nloaded = ns->_ns_nloaded; + struct link_map *maps[nloaded]; - /* Run over the list and assign indexes to the link maps. */ + /* Run over the list and assign indexes to the link maps and enter + them into the MAPS array. */ int idx = 0; for (struct link_map *l = ns->_ns_loaded; l != NULL; l = l->l_next) { l->l_map_used = 0; l->l_map_done = 0; l->l_idx = idx; + maps[idx] = l; ++idx; } assert (idx == nloaded); - /* Keep marking link maps until no new link maps are found. */ - for (struct link_map *l = ns->_ns_loaded; l != NULL; ) + /* Keep track of the lowest index link map we have covered already. */ + int done_index = -1; + while (++done_index < nloaded) { - /* next is reset to earlier link maps for remarking. */ - struct link_map *next = l->l_next; - int next_idx = l->l_idx + 1; /* next->l_idx, but covers next == NULL. */ + struct link_map *l = maps[done_index]; if (l->l_map_done) - { - /* Already handled. */ - l = next; - continue; - } + /* Already handled. */ + continue; /* Check whether this object is still used. */ if (l->l_type == lt_loaded @@ -172,10 +171,7 @@ _dl_close_worker (struct link_map *map, bool force) acquire is sufficient and correct. */ && atomic_load_acquire (&l->l_tls_dtor_count) == 0 && !l->l_map_used) - { - l = next; - continue; - } + continue; /* We need this object and we handle it now. */ l->l_map_used = 1; @@ -202,11 +198,8 @@ _dl_close_worker (struct link_map *map, bool force) already processed it, then we need to go back and process again from that point forward to ensure we keep all of its dependencies also. */ - if ((*lp)->l_idx < next_idx) - { - next = *lp; - next_idx = next->l_idx; - } + if ((*lp)->l_idx - 1 < done_index) + done_index = (*lp)->l_idx - 1; } } @@ -226,65 +219,44 @@ _dl_close_worker (struct link_map *map, bool force) if (!jmap->l_map_used) { jmap->l_map_used = 1; - if (jmap->l_idx < next_idx) - { - next = jmap; - next_idx = next->l_idx; - } + if (jmap->l_idx - 1 < done_index) + done_index = jmap->l_idx - 1; } } } - - l = next; } - /* Call the destructors in reverse constructor order, and remove the - closed link maps from the list. */ - for (struct link_map **init_called_head = &_dl_init_called_list; - *init_called_head != NULL; ) + /* Sort the entries. We can skip looking for the binary itself which is + at the front of the search list for the main namespace. */ + _dl_sort_maps (maps, nloaded, (nsid == LM_ID_BASE), true); + + /* Call all termination functions at once. */ + bool unload_any = false; + bool scope_mem_left = false; + unsigned int unload_global = 0; + unsigned int first_loaded = ~0; + for (unsigned int i = 0; i < nloaded; ++i) { - struct link_map *imap = *init_called_head; + struct link_map *imap = maps[i]; - /* _dl_init_called_list is global, to produce a global odering. - Ignore the other namespaces (and link maps that are still used). */ - if (imap->l_ns != nsid || imap->l_map_used) - init_called_head = &imap->l_init_called_next; - else + /* All elements must be in the same namespace. */ + assert (imap->l_ns == nsid); + + if (!imap->l_map_used) { assert (imap->l_type == lt_loaded && !imap->l_nodelete_active); - /* _dl_init_called_list is updated at the same time as - l_init_called. */ - assert (imap->l_init_called); - - if (imap->l_info[DT_FINI_ARRAY] != NULL - || imap->l_info[DT_FINI] != NULL) + /* Call its termination function. Do not do it for + half-cooked objects. Temporarily disable exception + handling, so that errors are fatal. */ + if (imap->l_init_called) _dl_catch_exception (NULL, _dl_call_fini, imap); #ifdef SHARED /* Auditing checkpoint: we remove an object. */ _dl_audit_objclose (imap); #endif - /* Unlink this link map. */ - *init_called_head = imap->l_init_called_next; - } - } - - - bool unload_any = false; - bool scope_mem_left = false; - unsigned int unload_global = 0; - - /* For skipping un-unloadable link maps in the second loop. */ - struct link_map *first_loaded = ns->_ns_loaded; - /* Iterate over the namespace to find objects to unload. Some - unloadable objects may not be on _dl_init_called_list due to - dlopen failure. */ - for (struct link_map *imap = first_loaded; imap != NULL; imap = imap->l_next) - { - if (!imap->l_map_used) - { /* This object must not be used anymore. */ imap->l_removed = 1; @@ -295,8 +267,8 @@ _dl_close_worker (struct link_map *map, bool force) ++unload_global; /* Remember where the first dynamically loaded object is. */ - if (first_loaded == NULL) - first_loaded = imap; + if (i < first_loaded) + first_loaded = i; } /* Else imap->l_map_used. */ else if (imap->l_type == lt_loaded) @@ -432,8 +404,8 @@ _dl_close_worker (struct link_map *map, bool force) imap->l_loader = NULL; /* Remember where the first dynamically loaded object is. */ - if (first_loaded == NULL) - first_loaded = imap; + if (i < first_loaded) + first_loaded = i; } } @@ -504,11 +476,10 @@ _dl_close_worker (struct link_map *map, bool force) /* Check each element of the search list to see if all references to it are gone. */ - for (struct link_map *imap = first_loaded; imap != NULL; ) + for (unsigned int i = first_loaded; i < nloaded; ++i) { - if (imap->l_map_used) - imap = imap->l_next; - else + struct link_map *imap = maps[i]; + if (!imap->l_map_used) { assert (imap->l_type == lt_loaded); @@ -719,9 +690,7 @@ _dl_close_worker (struct link_map *map, bool force) if (imap == GL(dl_initfirst)) GL(dl_initfirst) = NULL; - struct link_map *next = imap->l_next; free (imap); - imap = next; } } |