aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFlorian Weimer <fweimer@redhat.com>2016-04-13 14:10:49 -0500
committerPaul E. Murphy <murphyp@linux.vnet.ibm.com>2016-04-13 14:10:49 -0500
commiteb8c932bac2e6c1273107b8a2721f382575b002a (patch)
treee0d41098b7c5e9b3fba8a8374aa563b9fe8c8891
parent00cd4dad175f648783f808aef681d16c10fc34fa (diff)
downloadglibc-eb8c932bac2e6c1273107b8a2721f382575b002a.tar
glibc-eb8c932bac2e6c1273107b8a2721f382575b002a.tar.gz
glibc-eb8c932bac2e6c1273107b8a2721f382575b002a.tar.bz2
glibc-eb8c932bac2e6c1273107b8a2721f382575b002a.zip
malloc: Fix list_lock/arena lock deadlock [BZ #19182]
* malloc/arena.c (list_lock): Document lock ordering requirements. (free_list_lock): New lock. (ptmalloc_lock_all): Comment on free_list_lock. (ptmalloc_unlock_all2): Reinitialize free_list_lock. (detach_arena): Update comment. free_list_lock is now needed. (_int_new_arena): Use free_list_lock around detach_arena call. Acquire arena lock after list_lock. Add comment, including FIXME about incorrect synchronization. (get_free_list): Switch to free_list_lock. (reused_arena): Acquire free_list_lock around detach_arena call and attached threads counter update. Add two FIXMEs about incorrect synchronization. (arena_thread_freeres): Switch to free_list_lock. * malloc/malloc.c (struct malloc_state): Update comments to mention free_list_lock. (cherry picked from commit 90c400bd4904b0240a148f0b357a5cbc36179239)
-rw-r--r--ChangeLog19
-rw-r--r--NEWS2
-rw-r--r--malloc/arena.c63
-rw-r--r--malloc/malloc.c6
4 files changed, 75 insertions, 15 deletions
diff --git a/ChangeLog b/ChangeLog
index fde59f1fb0..e0cb376d68 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,24 @@
2016-04-13 Florian Weimer <fweimer@redhat.com>
+ [BZ #19182]
+ * malloc/arena.c (list_lock): Document lock ordering requirements.
+ (free_list_lock): New lock.
+ (ptmalloc_lock_all): Comment on free_list_lock.
+ (ptmalloc_unlock_all2): Reinitialize free_list_lock.
+ (detach_arena): Update comment. free_list_lock is now needed.
+ (_int_new_arena): Use free_list_lock around detach_arena call.
+ Acquire arena lock after list_lock. Add comment, including FIXME
+ about incorrect synchronization.
+ (get_free_list): Switch to free_list_lock.
+ (reused_arena): Acquire free_list_lock around detach_arena call
+ and attached threads counter update. Add two FIXMEs about
+ incorrect synchronization.
+ (arena_thread_freeres): Switch to free_list_lock.
+ * malloc/malloc.c (struct malloc_state): Update comments to
+ mention free_list_lock.
+
+2016-04-13 Florian Weimer <fweimer@redhat.com>
+
[BZ #19243]
* malloc/arena.c (get_free_list): Remove assert and adjust
reference count handling. Add comment about reused_arena
diff --git a/NEWS b/NEWS
index 21c85ca6a0..7b13178aa8 100644
--- a/NEWS
+++ b/NEWS
@@ -25,7 +25,7 @@ Version 2.22.1
17905, 18420, 18421, 18480, 18589, 18743, 18778, 18781, 18787, 18796,
18870, 18887, 18921, 18928, 18969, 18985, 19003, 19018, 19048, 19058,
- 19174, 19178, 19243, 19590, 19682, 19791, 19822, 19853, 19879.
+ 19174, 19178, 19182, 19243, 19590, 19682, 19791, 19822, 19853, 19879.
* The getnetbyname implementation in nss_dns had a potentially unbounded
alloca call (in the form of a call to strdupa), leading to a stack
diff --git a/malloc/arena.c b/malloc/arena.c
index f05c5023d9..463d31d88f 100644
--- a/malloc/arena.c
+++ b/malloc/arena.c
@@ -67,10 +67,29 @@ extern int sanity_check_heap_info_alignment[(sizeof (heap_info)
/* Thread specific data */
static tsd_key_t arena_key;
-static mutex_t list_lock = MUTEX_INITIALIZER;
+
+/* Arena free list. free_list_lock synchronizes access to the
+ free_list variable below, and the next_free and attached_threads
+ members of struct malloc_state objects. No other locks must be
+ acquired after free_list_lock has been acquired. */
+
+static mutex_t free_list_lock = MUTEX_INITIALIZER;
static size_t narenas = 1;
static mstate free_list;
+/* list_lock prevents concurrent writes to the next member of struct
+ malloc_state objects.
+
+ Read access to the next member is supposed to synchronize with the
+ atomic_write_barrier and the write to the next member in
+ _int_new_arena. This suffers from data races; see the FIXME
+ comments in _int_new_arena and reused_arena.
+
+ list_lock also prevents concurrent forks. When list_lock is
+ acquired, no arena lock must be acquired, but it is permitted to
+ acquire arena locks after list_lock. */
+static mutex_t list_lock = MUTEX_INITIALIZER;
+
/* Mapped memory in non-main arenas (reliable only for NO_THREADS). */
static unsigned long arena_mem;
@@ -210,6 +229,9 @@ ptmalloc_lock_all (void)
if (__malloc_initialized < 1)
return;
+ /* We do not acquire free_list_lock here because we completely
+ reconstruct free_list in ptmalloc_unlock_all2. */
+
if (mutex_trylock (&list_lock))
{
void *my_arena;
@@ -291,6 +313,7 @@ ptmalloc_unlock_all2 (void)
/* Push all arenas to the free list, except save_arena, which is
attached to the current thread. */
+ mutex_init (&free_list_lock);
if (save_arena != NULL)
((mstate) save_arena)->attached_threads = 1;
free_list = NULL;
@@ -308,6 +331,7 @@ ptmalloc_unlock_all2 (void)
if (ar_ptr == &main_arena)
break;
}
+
mutex_init (&list_lock);
atfork_recursive_cntr = 0;
}
@@ -735,7 +759,7 @@ heap_trim (heap_info *heap, size_t pad)
/* Create a new arena with initial size "size". */
/* If REPLACED_ARENA is not NULL, detach it from this thread. Must be
- called while list_lock is held. */
+ called while free_list_lock is held. */
static void
detach_arena (mstate replaced_arena)
{
@@ -789,19 +813,34 @@ _int_new_arena (size_t size)
tsd_getspecific (arena_key, replaced_arena);
tsd_setspecific (arena_key, (void *) a);
mutex_init (&a->mutex);
- (void) mutex_lock (&a->mutex);
(void) mutex_lock (&list_lock);
- detach_arena (replaced_arena);
-
/* Add the new arena to the global list. */
a->next = main_arena.next;
+ /* FIXME: The barrier is an attempt to synchronize with read access
+ in reused_arena, which does not acquire list_lock while
+ traversing the list. */
atomic_write_barrier ();
main_arena.next = a;
(void) mutex_unlock (&list_lock);
+ (void) mutex_lock (&free_list_lock);
+ detach_arena (replaced_arena);
+ (void) mutex_unlock (&free_list_lock);
+
+ /* Lock this arena. NB: Another thread may have been attached to
+ this arena because the arena is now accessible from the
+ main_arena.next list and could have been picked by reused_arena.
+ This can only happen for the last arena created (before the arena
+ limit is reached). At this point, some arena has to be attached
+ to two threads. We could acquire the arena lock before list_lock
+ to make it less likely that reused_arena picks this new arena,
+ but this could result in a deadlock with ptmalloc_lock_all. */
+
+ (void) mutex_lock (&a->mutex);
+
return a;
}
@@ -818,7 +857,7 @@ get_free_list (void)
if (result != NULL)
{
- (void) mutex_lock (&list_lock);
+ (void) mutex_lock (&free_list_lock);
result = free_list;
if (result != NULL)
{
@@ -829,7 +868,7 @@ get_free_list (void)
detach_arena (replaced_arena);
}
- (void) mutex_unlock (&list_lock);
+ (void) mutex_unlock (&free_list_lock);
if (result != NULL)
{
@@ -849,6 +888,7 @@ static mstate
reused_arena (mstate avoid_arena)
{
mstate result;
+ /* FIXME: Access to next_to_use suffers from data races. */
static mstate next_to_use;
if (next_to_use == NULL)
next_to_use = &main_arena;
@@ -861,6 +901,7 @@ reused_arena (mstate avoid_arena)
if (!arena_is_corrupt (result) && !mutex_trylock (&result->mutex))
goto out;
+ /* FIXME: This is a data race, see _int_new_arena. */
result = result->next;
}
while (result != next_to_use);
@@ -895,10 +936,10 @@ out:
mstate replaced_arena;
tsd_getspecific (arena, replaced_arena);
- (void) mutex_lock (&list_lock);
+ (void) mutex_lock (&free_list_lock);
detach_arena (replaced_arena);
++result->attached_threads;
- (void) mutex_unlock (&list_lock);
+ (void) mutex_unlock (&free_list_lock);
}
LIBC_PROBE (memory_arena_reuse, 2, result, avoid_arena);
@@ -993,7 +1034,7 @@ arena_thread_freeres (void)
if (a != NULL)
{
- (void) mutex_lock (&list_lock);
+ (void) mutex_lock (&free_list_lock);
/* If this was the last attached thread for this arena, put the
arena on the free list. */
assert (a->attached_threads > 0);
@@ -1002,7 +1043,7 @@ arena_thread_freeres (void)
a->next_free = free_list;
free_list = a;
}
- (void) mutex_unlock (&list_lock);
+ (void) mutex_unlock (&free_list_lock);
}
}
text_set_element (__libc_thread_subfreeres, arena_thread_freeres);
diff --git a/malloc/malloc.c b/malloc/malloc.c
index 037d4ff19b..5c84e628c6 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -1710,12 +1710,12 @@ struct malloc_state
struct malloc_state *next;
/* Linked list for free arenas. Access to this field is serialized
- by list_lock in arena.c. */
+ by free_list_lock in arena.c. */
struct malloc_state *next_free;
/* Number of threads attached to this arena. 0 if the arena is on
- the free list. Access to this field is serialized by list_lock
- in arena.c. */
+ the free list. Access to this field is serialized by
+ free_list_lock in arena.c. */
INTERNAL_SIZE_T attached_threads;
/* Memory allocated from the system in this arena. */