diff options
author | Chung-Lin Tang <cltang@codesourcery.com> | 2021-10-21 21:41:22 +0800 |
---|---|---|
committer | Adhemerval Zanella <adhemerval.zanella@linaro.org> | 2021-10-21 11:23:53 -0300 |
commit | 15a0c5730d1d5aeb95f50c9ec7470640084feae8 (patch) | |
tree | 3c38108344adce9db163005cb197b4300a6fff94 /elf/dl-close.c | |
parent | e6fd79f3795d46dfb583e124be49fc063bc3d58b (diff) | |
download | glibc-15a0c5730d1d5aeb95f50c9ec7470640084feae8.tar glibc-15a0c5730d1d5aeb95f50c9ec7470640084feae8.tar.gz glibc-15a0c5730d1d5aeb95f50c9ec7470640084feae8.tar.bz2 glibc-15a0c5730d1d5aeb95f50c9ec7470640084feae8.zip |
elf: Fix slow DSO sorting behavior in dynamic loader (BZ #17645)
This second patch contains the actual implementation of a new sorting algorithm
for shared objects in the dynamic loader, which solves the slow behavior that
the current "old" algorithm falls into when the DSO set contains circular
dependencies.
The new algorithm implemented here is simply depth-first search (DFS) to obtain
the Reverse-Post Order (RPO) sequence, a topological sort. A new l_visited:1
bitfield is added to struct link_map to more elegantly facilitate such a search.
The DFS algorithm is applied to the input maps[nmap-1] backwards towards
maps[0]. This has the effect of a more "shallow" recursion depth in general
since the input is in BFS. Also, when combined with the natural order of
processing l_initfini[] at each node, this creates a resulting output sorting
closer to the intuitive "left-to-right" order in most cases.
Another notable implementation adjustment related to this _dl_sort_maps change
is the removing of two char arrays 'used' and 'done' in _dl_close_worker to
represent two per-map attributes. This has been changed to simply use two new
bit-fields l_map_used:1, l_map_done:1 added to struct link_map. This also allows
discarding the clunky 'used' array sorting that _dl_sort_maps had to sometimes
do along the way.
Tunable support for switching between different sorting algorithms at runtime is
also added. A new tunable 'glibc.rtld.dynamic_sort' with current valid values 1
(old algorithm) and 2 (new DFS algorithm) has been added. At time of commit
of this patch, the default setting is 1 (old algorithm).
Signed-off-by: Chung-Lin Tang <cltang@codesourcery.com>
Reviewed-by: Adhemerval Zanella <adhemerval.zanella@linaro.org>
Diffstat (limited to 'elf/dl-close.c')
-rw-r--r-- | elf/dl-close.c | 34 |
1 files changed, 14 insertions, 20 deletions
diff --git a/elf/dl-close.c b/elf/dl-close.c index cfe0f1c0c9..4f5cfcc1c3 100644 --- a/elf/dl-close.c +++ b/elf/dl-close.c @@ -167,8 +167,6 @@ _dl_close_worker (struct link_map *map, bool force) bool any_tls = false; const unsigned int nloaded = ns->_ns_nloaded; - char used[nloaded]; - char done[nloaded]; struct link_map *maps[nloaded]; /* Run over the list and assign indexes to the link maps and enter @@ -176,24 +174,21 @@ _dl_close_worker (struct link_map *map, bool force) int idx = 0; for (struct link_map *l = ns->_ns_loaded; l != NULL; l = l->l_next) { + l->l_map_used = 0; + l->l_map_done = 0; l->l_idx = idx; maps[idx] = l; ++idx; - } assert (idx == nloaded); - /* Prepare the bitmaps. */ - memset (used, '\0', sizeof (used)); - memset (done, '\0', sizeof (done)); - /* Keep track of the lowest index link map we have covered already. */ int done_index = -1; while (++done_index < nloaded) { struct link_map *l = maps[done_index]; - if (done[done_index]) + if (l->l_map_done) /* Already handled. */ continue; @@ -204,12 +199,12 @@ _dl_close_worker (struct link_map *map, bool force) /* See CONCURRENCY NOTES in cxa_thread_atexit_impl.c to know why acquire is sufficient and correct. */ && atomic_load_acquire (&l->l_tls_dtor_count) == 0 - && !used[done_index]) + && !l->l_map_used) continue; /* We need this object and we handle it now. */ - done[done_index] = 1; - used[done_index] = 1; + l->l_map_used = 1; + l->l_map_done = 1; /* Signal the object is still needed. */ l->l_idx = IDX_STILL_USED; @@ -225,9 +220,9 @@ _dl_close_worker (struct link_map *map, bool force) { assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded); - if (!used[(*lp)->l_idx]) + if (!(*lp)->l_map_used) { - used[(*lp)->l_idx] = 1; + (*lp)->l_map_used = 1; /* If we marked a new object as used, and we've already processed it, then we need to go back and process again from that point forward to @@ -250,9 +245,9 @@ _dl_close_worker (struct link_map *map, bool force) { assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded); - if (!used[jmap->l_idx]) + if (!jmap->l_map_used) { - used[jmap->l_idx] = 1; + jmap->l_map_used = 1; if (jmap->l_idx - 1 < done_index) done_index = jmap->l_idx - 1; } @@ -262,8 +257,7 @@ _dl_close_worker (struct link_map *map, bool force) /* Sort the entries. We can skip looking for the binary itself which is at the front of the search list for the main namespace. */ - _dl_sort_maps (maps + (nsid == LM_ID_BASE), nloaded - (nsid == LM_ID_BASE), - used + (nsid == LM_ID_BASE), true); + _dl_sort_maps (maps, nloaded, (nsid == LM_ID_BASE), true); /* Call all termination functions at once. */ #ifdef SHARED @@ -280,7 +274,7 @@ _dl_close_worker (struct link_map *map, bool force) /* All elements must be in the same namespace. */ assert (imap->l_ns == nsid); - if (!used[i]) + if (!imap->l_map_used) { assert (imap->l_type == lt_loaded && !imap->l_nodelete_active); @@ -333,7 +327,7 @@ _dl_close_worker (struct link_map *map, bool force) if (i < first_loaded) first_loaded = i; } - /* Else used[i]. */ + /* Else imap->l_map_used. */ else if (imap->l_type == lt_loaded) { struct r_scope_elem *new_list = NULL; @@ -560,7 +554,7 @@ _dl_close_worker (struct link_map *map, bool force) for (unsigned int i = first_loaded; i < nloaded; ++i) { struct link_map *imap = maps[i]; - if (!used[i]) + if (!imap->l_map_used) { assert (imap->l_type == lt_loaded); |