aboutsummaryrefslogtreecommitdiff
path: root/malloc/malloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'malloc/malloc.c')
-rw-r--r--malloc/malloc.c55
1 files changed, 46 insertions, 9 deletions
diff --git a/malloc/malloc.c b/malloc/malloc.c
index ea97df2cb4..44524ff984 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -1758,6 +1758,17 @@ static struct malloc_state main_arena =
.attached_threads = 1
};
+/* These variables are used for undumping support. Chunked are marked
+ as using mmap, but we leave them alone if they fall into this
+ range. */
+static mchunkptr dumped_main_arena_start; /* Inclusive. */
+static mchunkptr dumped_main_arena_end; /* Exclusive. */
+
+/* True if the pointer falls into the dumped arena. Use this after
+ chunk_is_mmapped indicates a chunk is mmapped. */
+#define DUMPED_MAIN_ARENA_CHUNK(p) \
+ ((p) >= dumped_main_arena_start && (p) < dumped_main_arena_end)
+
/* There is only one instance of the malloc parameters. */
static struct malloc_par mp_ =
@@ -1947,7 +1958,7 @@ do_check_chunk (mstate av, mchunkptr p)
assert (prev_inuse (p));
}
}
- else
+ else if (!DUMPED_MAIN_ARENA_CHUNK (p))
{
/* address is outside main heap */
if (contiguous (av) && av->top != initial_top (av))
@@ -2822,6 +2833,11 @@ munmap_chunk (mchunkptr p)
assert (chunk_is_mmapped (p));
+ /* Do nothing if the chunk is a faked mmapped chunk in the dumped
+ main arena. We never free this memory. */
+ if (DUMPED_MAIN_ARENA_CHUNK (p))
+ return;
+
uintptr_t block = (uintptr_t) p - p->prev_size;
size_t total_size = p->prev_size + size;
/* Unfortunately we have to do the compilers job by hand here. Normally
@@ -2942,10 +2958,12 @@ __libc_free (void *mem)
if (chunk_is_mmapped (p)) /* release mmapped memory. */
{
- /* see if the dynamic brk/mmap threshold needs adjusting */
+ /* See if the dynamic brk/mmap threshold needs adjusting.
+ Dumped fake mmapped chunks do not affect the threshold. */
if (!mp_.no_dyn_threshold
&& p->size > mp_.mmap_threshold
- && p->size <= DEFAULT_MMAP_THRESHOLD_MAX)
+ && p->size <= DEFAULT_MMAP_THRESHOLD_MAX
+ && !DUMPED_MAIN_ARENA_CHUNK (p))
{
mp_.mmap_threshold = chunksize (p);
mp_.trim_threshold = 2 * mp_.mmap_threshold;
@@ -2995,12 +3013,15 @@ __libc_realloc (void *oldmem, size_t bytes)
else
ar_ptr = arena_for_chunk (oldp);
- /* Little security check which won't hurt performance: the
- allocator never wrapps around at the end of the address space.
- Therefore we can exclude some size values which might appear
- here by accident or by "design" from some intruder. */
- if (__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0)
- || __builtin_expect (misaligned_chunk (oldp), 0))
+ /* Little security check which won't hurt performance: the allocator
+ never wrapps around at the end of the address space. Therefore
+ we can exclude some size values which might appear here by
+ accident or by "design" from some intruder. We need to bypass
+ this check for dumped fake mmap chunks from the old main arena
+ because the new malloc may provide additional alignment. */
+ if ((__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0)
+ || __builtin_expect (misaligned_chunk (oldp), 0))
+ && !DUMPED_MAIN_ARENA_CHUNK (oldp))
{
malloc_printerr (check_action, "realloc(): invalid pointer", oldmem,
ar_ptr);
@@ -3011,6 +3032,22 @@ __libc_realloc (void *oldmem, size_t bytes)
if (chunk_is_mmapped (oldp))
{
+ /* If this is a faked mmapped chunk from the dumped main arena,
+ always make a copy (and do not free the old chunk). */
+ if (DUMPED_MAIN_ARENA_CHUNK (oldp))
+ {
+ /* Must alloc, copy, free. */
+ void *newmem = __libc_malloc (bytes);
+ if (newmem == 0)
+ return NULL;
+ /* Copy as many bytes as are available from the old chunk
+ and fit into the new size. */
+ if (bytes > oldsize - 2 * SIZE_SZ)
+ bytes = oldsize - 2 * SIZE_SZ;
+ memcpy (newmem, oldmem, bytes);
+ return newmem;
+ }
+
void *newmem;
#if HAVE_MREMAP