aboutsummaryrefslogtreecommitdiff
path: root/malloc/malloc.c
diff options
context:
space:
mode:
authorWilco Dijkstra <wdijkstr@arm.com>2017-10-20 17:27:53 +0100
committerWilco Dijkstra <wdijkstr@arm.com>2017-10-20 17:31:06 +0100
commita15d53e2de4c7d83bda251469d92a3c7b49a90db (patch)
tree6ed955b98d9166a0c9d85c30d1898a4d9238a3e9 /malloc/malloc.c
parentb9a558e790c7f13f04f3fcf229f12543b8119732 (diff)
downloadglibc-a15d53e2de4c7d83bda251469d92a3c7b49a90db.tar
glibc-a15d53e2de4c7d83bda251469d92a3c7b49a90db.tar.gz
glibc-a15d53e2de4c7d83bda251469d92a3c7b49a90db.tar.bz2
glibc-a15d53e2de4c7d83bda251469d92a3c7b49a90db.zip
Add single-threaded path to _int_free
This patch adds single-threaded fast paths to _int_free. Bypass the explicit locking for larger allocations. * malloc/malloc.c (_int_free): Add SINGLE_THREAD_P fast paths.
Diffstat (limited to 'malloc/malloc.c')
-rw-r--r--malloc/malloc.c43
1 files changed, 29 insertions, 14 deletions
diff --git a/malloc/malloc.c b/malloc/malloc.c
index 3d7c23917c..4db21cc8c1 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -4159,24 +4159,34 @@ _int_free (mstate av, mchunkptr p, int have_lock)
/* Atomically link P to its fastbin: P->FD = *FB; *FB = P; */
mchunkptr old = *fb, old2;
- unsigned int old_idx = ~0u;
- do
+
+ if (SINGLE_THREAD_P)
{
- /* Check that the top of the bin is not the record we are going to add
- (i.e., double free). */
+ /* Check that the top of the bin is not the record we are going to
+ add (i.e., double free). */
if (__builtin_expect (old == p, 0))
malloc_printerr ("double free or corruption (fasttop)");
- /* Check that size of fastbin chunk at the top is the same as
- size of the chunk that we are adding. We can dereference OLD
- only if we have the lock, otherwise it might have already been
- deallocated. See use of OLD_IDX below for the actual check. */
- if (have_lock && old != NULL)
- old_idx = fastbin_index(chunksize(old));
- p->fd = old2 = old;
+ p->fd = old;
+ *fb = p;
}
- while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2)) != old2);
-
- if (have_lock && old != NULL && __builtin_expect (old_idx != idx, 0))
+ else
+ do
+ {
+ /* Check that the top of the bin is not the record we are going to
+ add (i.e., double free). */
+ if (__builtin_expect (old == p, 0))
+ malloc_printerr ("double free or corruption (fasttop)");
+ p->fd = old2 = old;
+ }
+ while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2))
+ != old2);
+
+ /* Check that size of fastbin chunk at the top is the same as
+ size of the chunk that we are adding. We can dereference OLD
+ only if we have the lock, otherwise it might have already been
+ allocated again. */
+ if (have_lock && old != NULL
+ && __builtin_expect (fastbin_index (chunksize (old)) != idx, 0))
malloc_printerr ("invalid fastbin entry (free)");
}
@@ -4185,6 +4195,11 @@ _int_free (mstate av, mchunkptr p, int have_lock)
*/
else if (!chunk_is_mmapped(p)) {
+
+ /* If we're single-threaded, don't lock the arena. */
+ if (SINGLE_THREAD_P)
+ have_lock = true;
+
if (!have_lock)
__libc_lock_lock (av->mutex);