aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorUlrich Drepper <drepper@redhat.com>2004-11-13 05:43:36 +0000
committerUlrich Drepper <drepper@redhat.com>2004-11-13 05:43:36 +0000
commit37fa1953d539cc34c33b48bc66a64f5c01dbc1fd (patch)
treef4691635b5f6d3dec57f28abad2aa8b6fc67e667
parenta9055cab7ac609b96984179194c7a2ec410c8e2b (diff)
downloadglibc-37fa1953d539cc34c33b48bc66a64f5c01dbc1fd.tar
glibc-37fa1953d539cc34c33b48bc66a64f5c01dbc1fd.tar.gz
glibc-37fa1953d539cc34c33b48bc66a64f5c01dbc1fd.tar.bz2
glibc-37fa1953d539cc34c33b48bc66a64f5c01dbc1fd.zip
Update.
* malloc/malloc.c (_int_free): Remove test for NULL parameter. (_int_realloc): Call _int_free only if memory parameter is not NULL.
-rw-r--r--ChangeLog3
-rw-r--r--malloc/malloc.c308
-rw-r--r--sysdeps/posix/libc_fatal.c2
-rw-r--r--sysdeps/unix/sysv/linux/libc_fatal.c2
4 files changed, 160 insertions, 155 deletions
diff --git a/ChangeLog b/ChangeLog
index 1888ce03c0..21c8666aa4 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,8 @@
2004-11-12 Ulrich Drepper <drepper@redhat.com>
+ * malloc/malloc.c (_int_free): Remove test for NULL parameter.
+ (_int_realloc): Call _int_free only if memory parameter is not NULL.
+
* sysdeps/unix/sysv/linux/libc_fatal.c: Add new function __libc_message
which performs the printing and simple format string handling. The
string is written to tty, stderr, syslog in this order, stopping after
diff --git a/malloc/malloc.c b/malloc/malloc.c
index 3f4fd77807..d2112b159a 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -4196,191 +4196,188 @@ _int_free(mstate av, Void_t* mem)
mchunkptr fwd; /* misc temp for linking */
- /* free(0) has no effect */
- if (mem != 0) {
- const char *errstr = NULL;
+ const char *errstr = NULL;
- p = mem2chunk(mem);
- size = chunksize(p);
+ p = mem2chunk(mem);
+ size = chunksize(p);
- /* Little security check which won't hurt performance: the
- allocator never wrapps around at the end of the address space.
- Therefore we can exclude some size values which might appear
- here by accident or by "design" from some intruder. */
- if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0))
- {
- errstr = "free(): invalid pointer";
- errout:
- malloc_printerr (check_action, errstr, mem);
- return;
- }
+ /* Little security check which won't hurt performance: the
+ allocator never wrapps around at the end of the address space.
+ Therefore we can exclude some size values which might appear
+ here by accident or by "design" from some intruder. */
+ if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0))
+ {
+ errstr = "free(): invalid pointer";
+ errout:
+ malloc_printerr (check_action, errstr, mem);
+ return;
+ }
- check_inuse_chunk(av, p);
+ check_inuse_chunk(av, p);
- /*
- If eligible, place chunk on a fastbin so it can be found
- and used quickly in malloc.
- */
+ /*
+ If eligible, place chunk on a fastbin so it can be found
+ and used quickly in malloc.
+ */
- if ((unsigned long)(size) <= (unsigned long)(av->max_fast)
+ if ((unsigned long)(size) <= (unsigned long)(av->max_fast)
#if TRIM_FASTBINS
- /*
- If TRIM_FASTBINS set, don't place chunks
- bordering top into fastbins
- */
- && (chunk_at_offset(p, size) != av->top)
-#endif
- ) {
-
- set_fastchunks(av);
- fb = &(av->fastbins[fastbin_index(size)]);
- /* Another simple check: make sure the top of the bin is not the
- record we are going to add (i.e., double free). */
- if (__builtin_expect (*fb == p, 0))
- {
- errstr = "double free or corruption (fasttop)";
- goto errout;
- }
- p->fd = *fb;
- *fb = p;
- }
-
- /*
- Consolidate other non-mmapped chunks as they arrive.
- */
+ /*
+ If TRIM_FASTBINS set, don't place chunks
+ bordering top into fastbins
+ */
+ && (chunk_at_offset(p, size) != av->top)
+#endif
+ ) {
- else if (!chunk_is_mmapped(p)) {
- nextchunk = chunk_at_offset(p, size);
+ set_fastchunks(av);
+ fb = &(av->fastbins[fastbin_index(size)]);
+ /* Another simple check: make sure the top of the bin is not the
+ record we are going to add (i.e., double free). */
+ if (__builtin_expect (*fb == p, 0))
+ {
+ errstr = "double free or corruption (fasttop)";
+ goto errout;
+ }
+ p->fd = *fb;
+ *fb = p;
+ }
- /* Lightweight tests: check whether the block is already the
- top block. */
- if (__builtin_expect (p == av->top, 0))
- {
- errstr = "double free or corruption (top)";
- goto errout;
- }
- /* Or whether the next chunk is beyond the boundaries of the arena. */
- if (__builtin_expect (contiguous (av)
- && (char *) nextchunk
- >= ((char *) av->top + chunksize(av->top)), 0))
- {
- errstr = "double free or corruption (out)";
- goto errout;
- }
- /* Or whether the block is actually not marked used. */
- if (__builtin_expect (!prev_inuse(nextchunk), 0))
- {
- errstr = "double free or corruption (!prev)";
- goto errout;
- }
+ /*
+ Consolidate other non-mmapped chunks as they arrive.
+ */
- nextsize = chunksize(nextchunk);
- assert(nextsize > 0);
+ else if (!chunk_is_mmapped(p)) {
+ nextchunk = chunk_at_offset(p, size);
- /* consolidate backward */
- if (!prev_inuse(p)) {
- prevsize = p->prev_size;
- size += prevsize;
- p = chunk_at_offset(p, -((long) prevsize));
- unlink(p, bck, fwd);
+ /* Lightweight tests: check whether the block is already the
+ top block. */
+ if (__builtin_expect (p == av->top, 0))
+ {
+ errstr = "double free or corruption (top)";
+ goto errout;
+ }
+ /* Or whether the next chunk is beyond the boundaries of the arena. */
+ if (__builtin_expect (contiguous (av)
+ && (char *) nextchunk
+ >= ((char *) av->top + chunksize(av->top)), 0))
+ {
+ errstr = "double free or corruption (out)";
+ goto errout;
+ }
+ /* Or whether the block is actually not marked used. */
+ if (__builtin_expect (!prev_inuse(nextchunk), 0))
+ {
+ errstr = "double free or corruption (!prev)";
+ goto errout;
}
- if (nextchunk != av->top) {
- /* get and clear inuse bit */
- nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
-
- /* consolidate forward */
- if (!nextinuse) {
- unlink(nextchunk, bck, fwd);
- size += nextsize;
- } else
- clear_inuse_bit_at_offset(nextchunk, 0);
-
- /*
- Place the chunk in unsorted chunk list. Chunks are
- not placed into regular bins until after they have
- been given one chance to be used in malloc.
- */
+ nextsize = chunksize(nextchunk);
+ assert(nextsize > 0);
- bck = unsorted_chunks(av);
- fwd = bck->fd;
- p->bk = bck;
- p->fd = fwd;
- bck->fd = p;
- fwd->bk = p;
+ /* consolidate backward */
+ if (!prev_inuse(p)) {
+ prevsize = p->prev_size;
+ size += prevsize;
+ p = chunk_at_offset(p, -((long) prevsize));
+ unlink(p, bck, fwd);
+ }
- set_head(p, size | PREV_INUSE);
- set_foot(p, size);
+ if (nextchunk != av->top) {
+ /* get and clear inuse bit */
+ nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
- check_free_chunk(av, p);
- }
+ /* consolidate forward */
+ if (!nextinuse) {
+ unlink(nextchunk, bck, fwd);
+ size += nextsize;
+ } else
+ clear_inuse_bit_at_offset(nextchunk, 0);
/*
- If the chunk borders the current high end of memory,
- consolidate into top
+ Place the chunk in unsorted chunk list. Chunks are
+ not placed into regular bins until after they have
+ been given one chance to be used in malloc.
*/
- else {
- size += nextsize;
- set_head(p, size | PREV_INUSE);
- av->top = p;
- check_chunk(av, p);
- }
+ bck = unsorted_chunks(av);
+ fwd = bck->fd;
+ p->bk = bck;
+ p->fd = fwd;
+ bck->fd = p;
+ fwd->bk = p;
- /*
- If freeing a large space, consolidate possibly-surrounding
- chunks. Then, if the total unused topmost memory exceeds trim
- threshold, ask malloc_trim to reduce top.
-
- Unless max_fast is 0, we don't know if there are fastbins
- bordering top, so we cannot tell for sure whether threshold
- has been reached unless fastbins are consolidated. But we
- don't want to consolidate on each free. As a compromise,
- consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
- is reached.
- */
+ set_head(p, size | PREV_INUSE);
+ set_foot(p, size);
+
+ check_free_chunk(av, p);
+ }
+
+ /*
+ If the chunk borders the current high end of memory,
+ consolidate into top
+ */
- if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
- if (have_fastchunks(av))
- malloc_consolidate(av);
+ else {
+ size += nextsize;
+ set_head(p, size | PREV_INUSE);
+ av->top = p;
+ check_chunk(av, p);
+ }
+
+ /*
+ If freeing a large space, consolidate possibly-surrounding
+ chunks. Then, if the total unused topmost memory exceeds trim
+ threshold, ask malloc_trim to reduce top.
+
+ Unless max_fast is 0, we don't know if there are fastbins
+ bordering top, so we cannot tell for sure whether threshold
+ has been reached unless fastbins are consolidated. But we
+ don't want to consolidate on each free. As a compromise,
+ consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
+ is reached.
+ */
- if (av == &main_arena) {
+ if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
+ if (have_fastchunks(av))
+ malloc_consolidate(av);
+
+ if (av == &main_arena) {
#ifndef MORECORE_CANNOT_TRIM
- if ((unsigned long)(chunksize(av->top)) >=
- (unsigned long)(mp_.trim_threshold))
- sYSTRIm(mp_.top_pad, av);
+ if ((unsigned long)(chunksize(av->top)) >=
+ (unsigned long)(mp_.trim_threshold))
+ sYSTRIm(mp_.top_pad, av);
#endif
- } else {
- /* Always try heap_trim(), even if the top chunk is not
- large, because the corresponding heap might go away. */
- heap_info *heap = heap_for_ptr(top(av));
+ } else {
+ /* Always try heap_trim(), even if the top chunk is not
+ large, because the corresponding heap might go away. */
+ heap_info *heap = heap_for_ptr(top(av));
- assert(heap->ar_ptr == av);
- heap_trim(heap, mp_.top_pad);
- }
+ assert(heap->ar_ptr == av);
+ heap_trim(heap, mp_.top_pad);
}
-
}
- /*
- If the chunk was allocated via mmap, release via munmap(). Note
- that if HAVE_MMAP is false but chunk_is_mmapped is true, then
- user must have overwritten memory. There's nothing we can do to
- catch this error unless MALLOC_DEBUG is set, in which case
- check_inuse_chunk (above) will have triggered error.
- */
- else {
+ }
+ /*
+ If the chunk was allocated via mmap, release via munmap(). Note
+ that if HAVE_MMAP is false but chunk_is_mmapped is true, then
+ user must have overwritten memory. There's nothing we can do to
+ catch this error unless MALLOC_DEBUG is set, in which case
+ check_inuse_chunk (above) will have triggered error.
+ */
+
+ else {
#if HAVE_MMAP
- int ret;
- INTERNAL_SIZE_T offset = p->prev_size;
- mp_.n_mmaps--;
- mp_.mmapped_mem -= (size + offset);
- ret = munmap((char*)p - offset, size + offset);
- /* munmap returns non-zero on failure */
- assert(ret == 0);
+ int ret;
+ INTERNAL_SIZE_T offset = p->prev_size;
+ mp_.n_mmaps--;
+ mp_.mmapped_mem -= (size + offset);
+ ret = munmap((char*)p - offset, size + offset);
+ /* munmap returns non-zero on failure */
+ assert(ret == 0);
#endif
- }
}
}
@@ -4528,7 +4525,8 @@ _int_realloc(mstate av, Void_t* oldmem, size_t bytes)
#if REALLOC_ZERO_BYTES_FREES
if (bytes == 0) {
- _int_free(av, oldmem);
+ if (oldmem != 0)
+ _int_free(av, oldmem);
return 0;
}
#endif
diff --git a/sysdeps/posix/libc_fatal.c b/sysdeps/posix/libc_fatal.c
index fd97017579..fac8cbd051 100644
--- a/sysdeps/posix/libc_fatal.c
+++ b/sysdeps/posix/libc_fatal.c
@@ -129,6 +129,8 @@ __libc_message (int do_abort, const char *fmt, ...)
if (! written)
vsyslog (LOG_ERR, fmt, ap_copy);
+ va_end (ap_copy);
+
if (do_abort()
/* Kill the application. */
abort ();
diff --git a/sysdeps/unix/sysv/linux/libc_fatal.c b/sysdeps/unix/sysv/linux/libc_fatal.c
index 898be00604..c0482d96f3 100644
--- a/sysdeps/unix/sysv/linux/libc_fatal.c
+++ b/sysdeps/unix/sysv/linux/libc_fatal.c
@@ -141,6 +141,8 @@ __libc_message (int do_abort, const char *fmt, ...)
if (! written)
vsyslog (LOG_ERR, fmt, ap_copy);
+ va_end (ap_copy);
+
if (do_abort)
/* Terminate the process. */
abort ();