aboutsummaryrefslogtreecommitdiff
path: root/malloc/malloc.c
diff options
context:
space:
mode:
authorFlorian Weimer <fweimer@redhat.com>2016-09-06 12:49:54 +0200
committerFlorian Weimer <fweimer@redhat.com>2016-09-06 12:49:54 +0200
commit4bf5f2224baa1590f92f7a26930928fe9f7e4b57 (patch)
treed8a3457318df3733dfefc9476eb210288beba6d0 /malloc/malloc.c
parent6d5336211d2e823d4d431a01e62a80d9be4cbc9d (diff)
downloadglibc-4bf5f2224baa1590f92f7a26930928fe9f7e4b57.tar
glibc-4bf5f2224baa1590f92f7a26930928fe9f7e4b57.tar.gz
glibc-4bf5f2224baa1590f92f7a26930928fe9f7e4b57.tar.bz2
glibc-4bf5f2224baa1590f92f7a26930928fe9f7e4b57.zip
malloc: Automated part of conversion to __libc_lock
Diffstat (limited to 'malloc/malloc.c')
-rw-r--r--malloc/malloc.c40
1 files changed, 20 insertions, 20 deletions
diff --git a/malloc/malloc.c b/malloc/malloc.c
index bb52b3e177..20acb73c3e 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -2870,7 +2870,7 @@ __libc_malloc (size_t bytes)
}
if (ar_ptr != NULL)
- (void) mutex_unlock (&ar_ptr->mutex);
+ __libc_lock_unlock (ar_ptr->mutex);
assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
ar_ptr == arena_for_chunk (mem2chunk (victim)));
@@ -3012,11 +3012,11 @@ __libc_realloc (void *oldmem, size_t bytes)
return newmem;
}
- (void) mutex_lock (&ar_ptr->mutex);
+ __libc_lock_lock (ar_ptr->mutex);
newp = _int_realloc (ar_ptr, oldp, oldsize, nb);
- (void) mutex_unlock (&ar_ptr->mutex);
+ __libc_lock_unlock (ar_ptr->mutex);
assert (!newp || chunk_is_mmapped (mem2chunk (newp)) ||
ar_ptr == arena_for_chunk (mem2chunk (newp)));
@@ -3098,7 +3098,7 @@ _mid_memalign (size_t alignment, size_t bytes, void *address)
}
if (ar_ptr != NULL)
- (void) mutex_unlock (&ar_ptr->mutex);
+ __libc_lock_unlock (ar_ptr->mutex);
assert (!p || chunk_is_mmapped (mem2chunk (p)) ||
ar_ptr == arena_for_chunk (mem2chunk (p)));
@@ -3219,7 +3219,7 @@ __libc_calloc (size_t n, size_t elem_size)
}
if (av != NULL)
- (void) mutex_unlock (&av->mutex);
+ __libc_lock_unlock (av->mutex);
/* Allocation failed even after a retry. */
if (mem == 0)
@@ -3835,7 +3835,7 @@ _int_free (mstate av, mchunkptr p, int have_lock)
errstr = "free(): invalid pointer";
errout:
if (!have_lock && locked)
- (void) mutex_unlock (&av->mutex);
+ __libc_lock_unlock (av->mutex);
malloc_printerr (check_action, errstr, chunk2mem (p), av);
return;
}
@@ -3874,7 +3874,7 @@ _int_free (mstate av, mchunkptr p, int have_lock)
after getting the lock. */
if (have_lock
|| ({ assert (locked == 0);
- mutex_lock(&av->mutex);
+ __libc_lock_lock (av->mutex);
locked = 1;
chunk_at_offset (p, size)->size <= 2 * SIZE_SZ
|| chunksize (chunk_at_offset (p, size)) >= av->system_mem;
@@ -3885,7 +3885,7 @@ _int_free (mstate av, mchunkptr p, int have_lock)
}
if (! have_lock)
{
- (void)mutex_unlock(&av->mutex);
+ __libc_lock_unlock (av->mutex);
locked = 0;
}
}
@@ -3931,7 +3931,7 @@ _int_free (mstate av, mchunkptr p, int have_lock)
else if (!chunk_is_mmapped(p)) {
if (! have_lock) {
- (void)mutex_lock(&av->mutex);
+ __libc_lock_lock (av->mutex);
locked = 1;
}
@@ -4064,7 +4064,7 @@ _int_free (mstate av, mchunkptr p, int have_lock)
if (! have_lock) {
assert (locked);
- (void)mutex_unlock(&av->mutex);
+ __libc_lock_unlock (av->mutex);
}
}
/*
@@ -4531,9 +4531,9 @@ __malloc_trim (size_t s)
mstate ar_ptr = &main_arena;
do
{
- (void) mutex_lock (&ar_ptr->mutex);
+ __libc_lock_lock (ar_ptr->mutex);
result |= mtrim (ar_ptr, s);
- (void) mutex_unlock (&ar_ptr->mutex);
+ __libc_lock_unlock (ar_ptr->mutex);
ar_ptr = ar_ptr->next;
}
@@ -4662,9 +4662,9 @@ __libc_mallinfo (void)
ar_ptr = &main_arena;
do
{
- (void) mutex_lock (&ar_ptr->mutex);
+ __libc_lock_lock (ar_ptr->mutex);
int_mallinfo (ar_ptr, &m);
- (void) mutex_unlock (&ar_ptr->mutex);
+ __libc_lock_unlock (ar_ptr->mutex);
ar_ptr = ar_ptr->next;
}
@@ -4694,7 +4694,7 @@ __malloc_stats (void)
struct mallinfo mi;
memset (&mi, 0, sizeof (mi));
- (void) mutex_lock (&ar_ptr->mutex);
+ __libc_lock_lock (ar_ptr->mutex);
int_mallinfo (ar_ptr, &mi);
fprintf (stderr, "Arena %d:\n", i);
fprintf (stderr, "system bytes = %10u\n", (unsigned int) mi.arena);
@@ -4705,7 +4705,7 @@ __malloc_stats (void)
#endif
system_b += mi.arena;
in_use_b += mi.uordblks;
- (void) mutex_unlock (&ar_ptr->mutex);
+ __libc_lock_unlock (ar_ptr->mutex);
ar_ptr = ar_ptr->next;
if (ar_ptr == &main_arena)
break;
@@ -4733,7 +4733,7 @@ __libc_mallopt (int param_number, int value)
if (__malloc_initialized < 0)
ptmalloc_init ();
- (void) mutex_lock (&av->mutex);
+ __libc_lock_lock (av->mutex);
/* Ensure initialization/consolidation */
malloc_consolidate (av);
@@ -4811,7 +4811,7 @@ __libc_mallopt (int param_number, int value)
}
break;
}
- (void) mutex_unlock (&av->mutex);
+ __libc_lock_unlock (av->mutex);
return res;
}
libc_hidden_def (__libc_mallopt)
@@ -5058,7 +5058,7 @@ __malloc_info (int options, FILE *fp)
} sizes[NFASTBINS + NBINS - 1];
#define nsizes (sizeof (sizes) / sizeof (sizes[0]))
- mutex_lock (&ar_ptr->mutex);
+ __libc_lock_lock (ar_ptr->mutex);
for (size_t i = 0; i < NFASTBINS; ++i)
{
@@ -5117,7 +5117,7 @@ __malloc_info (int options, FILE *fp)
avail += sizes[NFASTBINS - 1 + i].total;
}
- mutex_unlock (&ar_ptr->mutex);
+ __libc_lock_unlock (ar_ptr->mutex);
total_nfastblocks += nfastblocks;
total_fastavail += fastavail;