diff options
author | Florian Weimer <fweimer@redhat.com> | 2019-11-13 15:44:56 +0100 |
---|---|---|
committer | Florian Weimer <fweimer@redhat.com> | 2019-11-27 20:55:35 +0100 |
commit | f63b73814f74032c0e5d0a83300e3d864ef905e5 (patch) | |
tree | dac6303d0f785a7103ede6546011bf430a42e236 /elf/dl-close.c | |
parent | a509eb117fac1d764b15eba64993f4bdb63d7f3c (diff) | |
download | glibc-f63b73814f74032c0e5d0a83300e3d864ef905e5.tar glibc-f63b73814f74032c0e5d0a83300e3d864ef905e5.tar.gz glibc-f63b73814f74032c0e5d0a83300e3d864ef905e5.tar.bz2 glibc-f63b73814f74032c0e5d0a83300e3d864ef905e5.zip |
Remove all loaded objects if dlopen fails, ignoring NODELETE [BZ #20839]
This introduces a “pending NODELETE” state in the link map, which is
flipped to the persistent NODELETE state late in dlopen, via
activate_nodelete. During initial relocation, symbol binding
records pending NODELETE state only. dlclose ignores pending NODELETE
state. Taken together, this results that a partially completed dlopen
is rolled back completely because new NODELETE mappings are unloaded.
Tested on x86_64-linux-gnu and i386-linux-gnu.
Change-Id: Ib2a3d86af6f92d75baca65431d74783ee0dbc292
Diffstat (limited to 'elf/dl-close.c')
-rw-r--r-- | elf/dl-close.c | 14 |
1 files changed, 3 insertions, 11 deletions
diff --git a/elf/dl-close.c b/elf/dl-close.c index 33486b9cbc..e35a62daf6 100644 --- a/elf/dl-close.c +++ b/elf/dl-close.c @@ -168,14 +168,6 @@ _dl_close_worker (struct link_map *map, bool force) char done[nloaded]; struct link_map *maps[nloaded]; - /* Clear DF_1_NODELETE to force object deletion. We don't need to touch - l_tls_dtor_count because forced object deletion only happens when an - error occurs during object load. Destructor registration for TLS - non-POD objects should not have happened till then for this - object. */ - if (force) - map->l_flags_1 &= ~DF_1_NODELETE; - /* Run over the list and assign indexes to the link maps and enter them into the MAPS array. */ int idx = 0; @@ -205,7 +197,7 @@ _dl_close_worker (struct link_map *map, bool force) /* Check whether this object is still used. */ if (l->l_type == lt_loaded && l->l_direct_opencount == 0 - && (l->l_flags_1 & DF_1_NODELETE) == 0 + && l->l_nodelete != link_map_nodelete_active /* See CONCURRENCY NOTES in cxa_thread_atexit_impl.c to know why acquire is sufficient and correct. */ && atomic_load_acquire (&l->l_tls_dtor_count) == 0 @@ -288,7 +280,7 @@ _dl_close_worker (struct link_map *map, bool force) if (!used[i]) { assert (imap->l_type == lt_loaded - && (imap->l_flags_1 & DF_1_NODELETE) == 0); + && imap->l_nodelete != link_map_nodelete_active); /* Call its termination function. Do not do it for half-cooked objects. Temporarily disable exception @@ -838,7 +830,7 @@ _dl_close (void *_map) before we took the lock. There is no way to detect this (see below) so we proceed assuming this isn't the case. First see whether we can remove the object at all. */ - if (__glibc_unlikely (map->l_flags_1 & DF_1_NODELETE)) + if (__glibc_unlikely (map->l_nodelete == link_map_nodelete_active)) { /* Nope. Do nothing. */ __rtld_lock_unlock_recursive (GL(dl_load_lock)); |