aboutsummaryrefslogtreecommitdiff
path: root/malloc
diff options
context:
space:
mode:
authorFlorian Weimer <fweimer@redhat.com>2015-12-16 12:39:48 +0100
committerFlorian Weimer <fweimer@redhat.com>2015-12-16 12:39:48 +0100
commit3da825ce483903e3a881a016113b3e59fd4041de (patch)
tree092a1973785289ec3bd6bb2eb9c40064608d33b6 /malloc
parentf1aceee39289f97a420126c58007eba77fb2dd30 (diff)
downloadglibc-3da825ce483903e3a881a016113b3e59fd4041de.tar
glibc-3da825ce483903e3a881a016113b3e59fd4041de.tar.gz
glibc-3da825ce483903e3a881a016113b3e59fd4041de.tar.bz2
glibc-3da825ce483903e3a881a016113b3e59fd4041de.zip
malloc: Fix attached thread reference count handling [BZ #19243]
reused_arena can increase the attached thread count of arenas on the free list. This means that the assertion that the reference count is zero is incorrect. In this case, the reference count initialization is incorrect as well and could cause arenas to be put on the free list too early (while they still have attached threads). * malloc/arena.c (get_free_list): Remove assert and adjust reference count handling. Add comment about reused_arena interaction. (reused_arena): Add comments abount get_free_list interaction. * malloc/tst-malloc-thread-exit.c: New file. * malloc/Makefile (tests): Add tst-malloc-thread-exit. (tst-malloc-thread-exit): Link against libpthread.
Diffstat (limited to 'malloc')
-rw-r--r--malloc/Makefile4
-rw-r--r--malloc/arena.c12
-rw-r--r--malloc/tst-malloc-thread-exit.c217
3 files changed, 228 insertions, 5 deletions
diff --git a/malloc/Makefile b/malloc/Makefile
index 67ed293148..aa0579cb98 100644
--- a/malloc/Makefile
+++ b/malloc/Makefile
@@ -28,7 +28,7 @@ tests := mallocbug tst-malloc tst-valloc tst-calloc tst-obstack \
tst-mallocstate tst-mcheck tst-mallocfork tst-trim1 \
tst-malloc-usable tst-realloc tst-posix_memalign \
tst-pvalloc tst-memalign tst-mallopt tst-scratch_buffer \
- tst-malloc-backtrace
+ tst-malloc-backtrace tst-malloc-thread-exit
test-srcs = tst-mtrace
routines = malloc morecore mcheck mtrace obstack \
@@ -47,6 +47,8 @@ libmemusage-inhibit-o = $(filter-out .os,$(object-suffixes))
$(objpfx)tst-malloc-backtrace: $(common-objpfx)nptl/libpthread.so \
$(common-objpfx)nptl/libpthread_nonshared.a
+$(objpfx)tst-malloc-thread-exit: $(common-objpfx)nptl/libpthread.so \
+ $(common-objpfx)nptl/libpthread_nonshared.a
# These should be removed by `make clean'.
extra-objs = mcheck-init.o libmcheck.a
diff --git a/malloc/arena.c b/malloc/arena.c
index 3dab7bb297..73bda84b57 100644
--- a/malloc/arena.c
+++ b/malloc/arena.c
@@ -805,6 +805,8 @@ _int_new_arena (size_t size)
}
+/* Remove an arena from free_list. The arena may be in use because it
+ was attached concurrently to a thread by reused_arena below. */
static mstate
get_free_list (void)
{
@@ -818,10 +820,8 @@ get_free_list (void)
{
free_list = result->next_free;
- /* Arenas on the free list are not attached to any thread. */
- assert (result->attached_threads == 0);
- /* But the arena will now be attached to this thread. */
- result->attached_threads = 1;
+ /* The arena will be attached to this thread. */
+ ++result->attached_threads;
detach_arena (replaced_arena);
}
@@ -849,6 +849,8 @@ reused_arena (mstate avoid_arena)
if (next_to_use == NULL)
next_to_use = &main_arena;
+ /* Iterate over all arenas (including those linked from
+ free_list). */
result = next_to_use;
do
{
@@ -883,6 +885,8 @@ reused_arena (mstate avoid_arena)
(void) mutex_lock (&result->mutex);
out:
+ /* Attach the arena to the current thread. Note that we may have
+ selected an arena which was on free_list. */
{
mstate replaced_arena = thread_arena;
(void) mutex_lock (&list_lock);
diff --git a/malloc/tst-malloc-thread-exit.c b/malloc/tst-malloc-thread-exit.c
new file mode 100644
index 0000000000..da7297e49c
--- /dev/null
+++ b/malloc/tst-malloc-thread-exit.c
@@ -0,0 +1,217 @@
+/* Test malloc with concurrent thread termination.
+ Copyright (C) 2015 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+/* This thread spawns a number of outer threads, equal to the arena
+ limit. The outer threads run a loop which start and join two
+ different kinds of threads: the first kind allocates (attaching an
+ arena to the thread; malloc_first_thread) and waits, the second
+ kind waits and allocates (wait_first_threads). Both kinds of
+ threads exit immediately after waiting. The hope is that this will
+ exhibit races in thread termination and arena management,
+ particularly related to the arena free list. */
+
+#include <errno.h>
+#include <pthread.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#define TIMEOUT 7
+
+static bool termination_requested;
+static int inner_thread_count = 4;
+static size_t malloc_size = 32;
+
+static void
+__attribute__ ((noinline, noclone))
+unoptimized_free (void *ptr)
+{
+ free (ptr);
+}
+
+static void *
+malloc_first_thread (void * closure)
+{
+ pthread_barrier_t *barrier = closure;
+ void *ptr = malloc (malloc_size);
+ if (ptr == NULL)
+ {
+ printf ("error: malloc: %m\n");
+ abort ();
+ }
+ int ret = pthread_barrier_wait (barrier);
+ if (ret != 0 && ret != PTHREAD_BARRIER_SERIAL_THREAD)
+ {
+ errno = ret;
+ printf ("error: pthread_barrier_wait: %m\n");
+ abort ();
+ }
+ unoptimized_free (ptr);
+ return NULL;
+}
+
+static void *
+wait_first_thread (void * closure)
+{
+ pthread_barrier_t *barrier = closure;
+ int ret = pthread_barrier_wait (barrier);
+ if (ret != 0 && ret != PTHREAD_BARRIER_SERIAL_THREAD)
+ {
+ errno = ret;
+ printf ("error: pthread_barrier_wait: %m\n");
+ abort ();
+ }
+ void *ptr = malloc (malloc_size);
+ if (ptr == NULL)
+ {
+ printf ("error: malloc: %m\n");
+ abort ();
+ }
+ unoptimized_free (ptr);
+ return NULL;
+}
+
+static void *
+outer_thread (void *closure)
+{
+ pthread_t *threads = calloc (sizeof (*threads), inner_thread_count);
+ if (threads == NULL)
+ {
+ printf ("error: calloc: %m\n");
+ abort ();
+ }
+
+ while (!__atomic_load_n (&termination_requested, __ATOMIC_RELAXED))
+ {
+ pthread_barrier_t barrier;
+ int ret = pthread_barrier_init (&barrier, NULL, inner_thread_count + 1);
+ if (ret != 0)
+ {
+ errno = ret;
+ printf ("pthread_barrier_init: %m\n");
+ abort ();
+ }
+ for (int i = 0; i < inner_thread_count; ++i)
+ {
+ void *(*func) (void *);
+ if ((i % 2) == 0)
+ func = malloc_first_thread;
+ else
+ func = wait_first_thread;
+ ret = pthread_create (threads + i, NULL, func, &barrier);
+ if (ret != 0)
+ {
+ errno = ret;
+ printf ("error: pthread_create: %m\n");
+ abort ();
+ }
+ }
+ ret = pthread_barrier_wait (&barrier);
+ if (ret != 0 && ret != PTHREAD_BARRIER_SERIAL_THREAD)
+ {
+ errno = ret;
+ printf ("pthread_wait: %m\n");
+ abort ();
+ }
+ for (int i = 0; i < inner_thread_count; ++i)
+ {
+ ret = pthread_join (threads[i], NULL);
+ if (ret != 0)
+ {
+ ret = errno;
+ printf ("error: pthread_join: %m\n");
+ abort ();
+ }
+ }
+ ret = pthread_barrier_destroy (&barrier);
+ if (ret != 0)
+ {
+ ret = errno;
+ printf ("pthread_barrier_destroy: %m\n");
+ abort ();
+ }
+ }
+
+ free (threads);
+
+ return NULL;
+}
+
+static int
+do_test (void)
+{
+ /* The number of top-level threads should be equal to the number of
+ arenas. See arena_get2. */
+ long outer_thread_count = sysconf (_SC_NPROCESSORS_ONLN);
+ if (outer_thread_count >= 1)
+ {
+ /* See NARENAS_FROM_NCORES in malloc.c. */
+ if (sizeof (long) == 4)
+ outer_thread_count *= 2;
+ else
+ outer_thread_count *= 8;
+ }
+
+ /* Leave some room for shutting down all threads gracefully. */
+ int timeout = TIMEOUT - 2;
+
+ pthread_t *threads = calloc (sizeof (*threads), outer_thread_count);
+ if (threads == NULL)
+ {
+ printf ("error: calloc: %m\n");
+ abort ();
+ }
+
+ for (long i = 0; i < outer_thread_count; ++i)
+ {
+ int ret = pthread_create (threads + i, NULL, outer_thread, NULL);
+ if (ret != 0)
+ {
+ errno = ret;
+ printf ("error: pthread_create: %m\n");
+ abort ();
+ }
+ }
+
+ struct timespec ts = {timeout, 0};
+ if (nanosleep (&ts, NULL))
+ {
+ printf ("error: error: nanosleep: %m\n");
+ abort ();
+ }
+
+ __atomic_store_n (&termination_requested, true, __ATOMIC_RELAXED);
+
+ for (long i = 0; i < outer_thread_count; ++i)
+ {
+ int ret = pthread_join (threads[i], NULL);
+ if (ret != 0)
+ {
+ errno = ret;
+ printf ("error: pthread_join: %m\n");
+ abort ();
+ }
+ }
+ free (threads);
+
+ return 0;
+}
+
+#define TEST_FUNCTION do_test ()
+#include "../test-skeleton.c"