aboutsummaryrefslogtreecommitdiff
path: root/malloc
diff options
context:
space:
mode:
authorUlrich Drepper <drepper@gmail.com>2011-09-10 21:47:36 -0400
committerUlrich Drepper <drepper@gmail.com>2011-09-10 21:47:36 -0400
commit02d46fc4b969e25e4ba0c54aa95fa98d7279bd05 (patch)
tree8d0fc5bfaeac42091551da91a1f992ff656f1f56 /malloc
parent22a89187139a9083ca73989bfd11597e0f85cb61 (diff)
downloadglibc-02d46fc4b969e25e4ba0c54aa95fa98d7279bd05.tar
glibc-02d46fc4b969e25e4ba0c54aa95fa98d7279bd05.tar.gz
glibc-02d46fc4b969e25e4ba0c54aa95fa98d7279bd05.tar.bz2
glibc-02d46fc4b969e25e4ba0c54aa95fa98d7279bd05.zip
Simplify malloc initialization
Singificantly reduce the code needed at malloc initialization. In the process getpagesize is simplified by always initializing GLRO(dl_pagesize).
Diffstat (limited to 'malloc')
-rw-r--r--malloc/arena.c48
-rw-r--r--malloc/hooks.c4
-rw-r--r--malloc/malloc.c69
3 files changed, 43 insertions, 78 deletions
diff --git a/malloc/arena.c b/malloc/arena.c
index 6d5114bb82..9114fd28e9 100644
--- a/malloc/arena.c
+++ b/malloc/arena.c
@@ -76,9 +76,9 @@ extern int sanity_check_heap_info_alignment[(sizeof (heap_info)
/* Thread specific data */
static tsd_key_t arena_key;
-static mutex_t list_lock;
+static mutex_t list_lock = MUTEX_INITIALIZER;
#ifdef PER_THREAD
-static size_t narenas;
+static size_t narenas = 1;
static mstate free_list;
#endif
@@ -353,24 +353,6 @@ next_env_entry (char ***position)
return result;
}
-/* Set up basic state so that _int_malloc et al can work. */
-static void
-ptmalloc_init_minimal (void)
-{
-#if DEFAULT_TOP_PAD != 0
- mp_.top_pad = DEFAULT_TOP_PAD;
-#endif
- mp_.n_mmaps_max = DEFAULT_MMAP_MAX;
- mp_.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
- mp_.trim_threshold = DEFAULT_TRIM_THRESHOLD;
- mp_.pagesize = malloc_getpagesize;
-#ifdef PER_THREAD
-# define NARENAS_FROM_NCORES(n) ((n) * (sizeof(long) == 4 ? 2 : 8))
- mp_.arena_test = NARENAS_FROM_NCORES (1);
- narenas = 1;
-#endif
-}
-
#ifdef SHARED
static void *
@@ -386,17 +368,9 @@ libc_hidden_proto (_dl_open_hook);
static void
ptmalloc_init (void)
{
- const char* s;
- int secure = 0;
-
if(__malloc_initialized >= 0) return;
__malloc_initialized = 0;
- ptmalloc_init_minimal();
-
- mutex_init(&main_arena.mutex);
- main_arena.next = &main_arena;
-
#ifdef SHARED
/* In case this libc copy is in a non-default namespace, never use brk.
Likewise if dlopened from statically linked program. */
@@ -409,12 +383,10 @@ ptmalloc_init (void)
__morecore = __failing_morecore;
#endif
- mutex_init(&list_lock);
tsd_key_create(&arena_key, NULL);
tsd_setspecific(arena_key, (void *)&main_arena);
thread_atfork(ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all2);
- secure = __libc_enable_secure;
- s = NULL;
+ const char *s = NULL;
if (__builtin_expect (_environ != NULL, 1))
{
char **runp = _environ;
@@ -438,7 +410,7 @@ ptmalloc_init (void)
s = &envline[7];
break;
case 8:
- if (! secure)
+ if (! __builtin_expect (__libc_enable_secure, 0))
{
if (memcmp (envline, "TOP_PAD_", 8) == 0)
mALLOPt(M_TOP_PAD, atoi(&envline[9]));
@@ -447,7 +419,7 @@ ptmalloc_init (void)
}
break;
case 9:
- if (! secure)
+ if (! __builtin_expect (__libc_enable_secure, 0))
{
if (memcmp (envline, "MMAP_MAX_", 9) == 0)
mALLOPt(M_MMAP_MAX, atoi(&envline[10]));
@@ -459,7 +431,7 @@ ptmalloc_init (void)
break;
#ifdef PER_THREAD
case 10:
- if (! secure)
+ if (! __builtin_expect (__libc_enable_secure, 0))
{
if (memcmp (envline, "ARENA_TEST", 10) == 0)
mALLOPt(M_ARENA_TEST, atoi(&envline[11]));
@@ -467,7 +439,7 @@ ptmalloc_init (void)
break;
#endif
case 15:
- if (! secure)
+ if (! __builtin_expect (__libc_enable_secure, 0))
{
if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0)
mALLOPt(M_TRIM_THRESHOLD, atoi(&envline[16]));
@@ -549,7 +521,7 @@ static heap_info *
internal_function
new_heap(size_t size, size_t top_pad)
{
- size_t page_mask = malloc_getpagesize - 1;
+ size_t page_mask = GLRO(dl_pagesize) - 1;
char *p1, *p2;
unsigned long ul;
heap_info *h;
@@ -619,7 +591,7 @@ new_heap(size_t size, size_t top_pad)
static int
grow_heap(heap_info *h, long diff)
{
- size_t page_mask = malloc_getpagesize - 1;
+ size_t page_mask = GLRO(dl_pagesize) - 1;
long new_size;
diff = (diff + page_mask) & ~page_mask;
@@ -679,7 +651,7 @@ internal_function
heap_trim(heap_info *heap, size_t pad)
{
mstate ar_ptr = heap->ar_ptr;
- unsigned long pagesz = mp_.pagesize;
+ unsigned long pagesz = GLRO(dl_pagesize);
mchunkptr top_chunk = top(ar_ptr), p, bck, fwd;
heap_info *prev_heap;
long new_size, top_size, extra;
diff --git a/malloc/hooks.c b/malloc/hooks.c
index a0793a6316..7817ae9d4a 100644
--- a/malloc/hooks.c
+++ b/malloc/hooks.c
@@ -147,7 +147,7 @@ mem2chunk_check(void* mem, unsigned char **magic_p)
if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
}
} else {
- unsigned long offset, page_mask = malloc_getpagesize-1;
+ unsigned long offset, page_mask = GLRO(dl_pagesize)-1;
/* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two
alignment relative to the beginning of a page. Check this
@@ -182,7 +182,7 @@ top_check(void)
mchunkptr t = top(&main_arena);
char* brk, * new_brk;
INTERNAL_SIZE_T front_misalign, sbrk_size;
- unsigned long pagesz = malloc_getpagesize;
+ unsigned long pagesz = GLRO(dl_pagesize);
if (t == initial_top(&main_arena) ||
(!chunk_is_mmapped(t) &&
diff --git a/malloc/malloc.c b/malloc/malloc.c
index 0683eee2a1..3cff6d84d6 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -170,7 +170,6 @@
Compilation Environment options:
HAVE_MREMAP 0 unless linux defined
- malloc_getpagesize derived from system #includes, or 4096 if not
Changing default word sizes:
@@ -226,6 +225,8 @@
#include <bits/wordsize.h>
#include <sys/sysinfo.h>
+#include <ldsodefs.h>
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -454,7 +455,6 @@ __malloc_assert (const char *assertion, const char *file, unsigned int line,
#define public_iCOMALLOc __libc_independent_comalloc
#define public_gET_STATe __malloc_get_state
#define public_sET_STATe __malloc_set_state
-#define malloc_getpagesize __getpagesize()
#define open __open
#define mmap __mmap
#define munmap __munmap
@@ -589,24 +589,6 @@ void *(*__morecore)(ptrdiff_t) = __default_morecore;
/*
- The system page size. To the extent possible, this malloc manages
- memory from the system in page-size units. Note that this value is
- cached during initialization into a field of malloc_state. So even
- if malloc_getpagesize is a function, it is only called once.
-
- The following mechanics for getpagesize were adapted from bsd/gnu
- getpagesize.h. If none of the system-probes here apply, a value of
- 4096 is used, which should be OK: If they don't apply, then using
- the actual value probably doesn't impact performance.
-*/
-
-
-#ifndef malloc_getpagesize
-# include <unistd.h>
-# define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
-#endif
-
-/*
This version of malloc supports the standard SVID/XPG mallinfo
routine that returns a struct containing usage properties and
statistics. It should work on any SVID/XPG compliant system that has
@@ -1878,9 +1860,6 @@ struct malloc_par {
dynamic behavior. */
int no_dyn_threshold;
- /* Cache malloc_getpagesize */
- unsigned int pagesize;
-
/* Statistics */
INTERNAL_SIZE_T mmapped_mem;
/*INTERNAL_SIZE_T sbrked_mem;*/
@@ -1898,11 +1877,25 @@ struct malloc_par {
before using. This malloc relies on the property that malloc_state
is initialized to all zeroes (as is true of C statics). */
-static struct malloc_state main_arena;
+static struct malloc_state main_arena =
+ {
+ .mutex = MUTEX_INITIALIZER,
+ .next = &main_arena
+ };
/* There is only one instance of the malloc parameters. */
-static struct malloc_par mp_;
+static struct malloc_par mp_ =
+ {
+ .top_pad = DEFAULT_TOP_PAD,
+ .n_mmaps_max = DEFAULT_MMAP_MAX,
+ .mmap_threshold = DEFAULT_MMAP_THRESHOLD,
+ .trim_threshold = DEFAULT_TRIM_THRESHOLD,
+#ifdef PER_THREAD
+# define NARENAS_FROM_NCORES(n) ((n) * (sizeof(long) == 4 ? 2 : 8))
+ .arena_test = NARENAS_FROM_NCORES (1)
+#endif
+ };
#ifdef PER_THREAD
@@ -2070,7 +2063,7 @@ static void do_check_chunk(mstate av, mchunkptr p)
assert(((char*)p) < min_address || ((char*)p) >= max_address);
}
/* chunk is page-aligned */
- assert(((p->prev_size + sz) & (mp_.pagesize-1)) == 0);
+ assert(((p->prev_size + sz) & (GLRO(dl_pagesize)-1)) == 0);
/* mem is aligned */
assert(aligned_OK(chunk2mem(p)));
}
@@ -2231,7 +2224,7 @@ static void do_check_malloc_state(mstate av)
return;
/* pagesize is a power of 2 */
- assert((mp_.pagesize & (mp_.pagesize-1)) == 0);
+ assert((GLRO(dl_pagesize) & (GLRO(dl_pagesize)-1)) == 0);
/* A contiguous main_arena is consistent with sbrk_base. */
if (av == &main_arena && contiguous(av))
@@ -2389,7 +2382,7 @@ static void* sYSMALLOc(INTERNAL_SIZE_T nb, mstate av)
unsigned long sum; /* for updating stats */
- size_t pagemask = mp_.pagesize - 1;
+ size_t pagemask = GLRO(dl_pagesize) - 1;
bool tried_mmap = false;
@@ -2804,7 +2797,7 @@ static int sYSTRIm(size_t pad, mstate av)
char* new_brk; /* address returned by post-check sbrk call */
size_t pagesz;
- pagesz = mp_.pagesize;
+ pagesz = GLRO(dl_pagesize);
top_size = chunksize(av->top);
/* Release in pagesize units, keeping at least one page */
@@ -2867,7 +2860,7 @@ munmap_chunk(mchunkptr p)
page size. But gcc does not recognize the optimization possibility
(in the moment at least) so we combine the two values into one before
the bit test. */
- if (__builtin_expect (((block | total_size) & (mp_.pagesize - 1)) != 0, 0))
+ if (__builtin_expect (((block | total_size) & (GLRO(dl_pagesize) - 1)) != 0, 0))
{
malloc_printerr (check_action, "munmap_chunk(): invalid pointer",
chunk2mem (p));
@@ -2889,13 +2882,13 @@ static mchunkptr
internal_function
mremap_chunk(mchunkptr p, size_t new_size)
{
- size_t page_mask = mp_.pagesize - 1;
+ size_t page_mask = GLRO(dl_pagesize) - 1;
INTERNAL_SIZE_T offset = p->prev_size;
INTERNAL_SIZE_T size = chunksize(p);
char *cp;
assert (chunk_is_mmapped(p));
- assert(((size + offset) & (mp_.pagesize-1)) == 0);
+ assert(((size + offset) & (GLRO(dl_pagesize)-1)) == 0);
/* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
new_size = (new_size + offset + SIZE_SZ + page_mask) & ~page_mask;
@@ -3157,7 +3150,7 @@ public_vALLOc(size_t bytes)
if(__malloc_initialized < 0)
ptmalloc_init ();
- size_t pagesz = mp_.pagesize;
+ size_t pagesz = GLRO(dl_pagesize);
__malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
__const __malloc_ptr_t)) =
@@ -3201,8 +3194,8 @@ public_pVALLOc(size_t bytes)
if(__malloc_initialized < 0)
ptmalloc_init ();
- size_t pagesz = mp_.pagesize;
- size_t page_mask = mp_.pagesize - 1;
+ size_t pagesz = GLRO(dl_pagesize);
+ size_t page_mask = GLRO(dl_pagesize) - 1;
size_t rounded_bytes = (bytes + page_mask) & ~(page_mask);
__malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
@@ -4582,7 +4575,7 @@ _int_valloc(mstate av, size_t bytes)
{
/* Ensure initialization/consolidation */
if (have_fastchunks(av)) malloc_consolidate(av);
- return _int_memalign(av, mp_.pagesize, bytes);
+ return _int_memalign(av, GLRO(dl_pagesize), bytes);
}
/*
@@ -4597,7 +4590,7 @@ _int_pvalloc(mstate av, size_t bytes)
/* Ensure initialization/consolidation */
if (have_fastchunks(av)) malloc_consolidate(av);
- pagesz = mp_.pagesize;
+ pagesz = GLRO(dl_pagesize);
return _int_memalign(av, pagesz, (bytes + pagesz - 1) & ~(pagesz - 1));
}
@@ -4611,7 +4604,7 @@ static int mTRIm(mstate av, size_t pad)
/* Ensure initialization/consolidation */
malloc_consolidate (av);
- const size_t ps = mp_.pagesize;
+ const size_t ps = GLRO(dl_pagesize);
int psindex = bin_index (ps);
const size_t psm1 = ps - 1;