aboutsummaryrefslogtreecommitdiff
path: root/malloc
diff options
context:
space:
mode:
authorCarlos O'Donell <carlos@redhat.com>2014-03-03 12:28:25 -0500
committerCarlos O'Donell <carlos@redhat.com>2014-03-03 12:30:17 -0500
commitd6285c9f0b3972369356554727b1ede5a6eb0731 (patch)
tree7b1a6f931a0f501b9afd0cffed7798db2b2a5b32 /malloc
parent2b85d2a0b9a4aea4d431f6e6618c1f68c47d2a9f (diff)
downloadglibc-d6285c9f0b3972369356554727b1ede5a6eb0731.tar
glibc-d6285c9f0b3972369356554727b1ede5a6eb0731.tar.gz
glibc-d6285c9f0b3972369356554727b1ede5a6eb0731.tar.bz2
glibc-d6285c9f0b3972369356554727b1ede5a6eb0731.zip
Revert 4248f0da6ff9e7dd63464cdecec2dec332dfc2f0.
Objections were raised surrounding the calloc simplification and it is better to revert the patch, continue discussions and then submit a new patch for inclusion with all issues fully addressed.
Diffstat (limited to 'malloc')
-rw-r--r--malloc/malloc.c115
1 files changed, 109 insertions, 6 deletions
diff --git a/malloc/malloc.c b/malloc/malloc.c
index 74ad92dbd0..9a45707ee7 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -3141,8 +3141,13 @@ __libc_pvalloc (size_t bytes)
void *
__libc_calloc (size_t n, size_t elem_size)
{
- INTERNAL_SIZE_T bytes;
+ mstate av;
+ mchunkptr oldtop, p;
+ INTERNAL_SIZE_T bytes, sz, csz, oldtopsize;
void *mem;
+ unsigned long clearsize;
+ unsigned long nclears;
+ INTERNAL_SIZE_T *d;
/* size_t is unsigned so the behavior on overflow is defined. */
bytes = n * elem_size;
@@ -3161,15 +3166,113 @@ __libc_calloc (size_t n, size_t elem_size)
atomic_forced_read (__malloc_hook);
if (__builtin_expect (hook != NULL, 0))
{
- mem = (*hook)(bytes, RETURN_ADDRESS (0));
+ sz = bytes;
+ mem = (*hook)(sz, RETURN_ADDRESS (0));
+ if (mem == 0)
+ return 0;
+
+ return memset (mem, 0, sz);
}
- else
- mem = __libc_malloc (bytes);
- if (mem == 0)
+ sz = bytes;
+
+ arena_get (av, sz);
+ if (!av)
return 0;
- return memset (mem, 0, bytes);
+ /* Check if we hand out the top chunk, in which case there may be no
+ need to clear. */
+#if MORECORE_CLEARS
+ oldtop = top (av);
+ oldtopsize = chunksize (top (av));
+# if MORECORE_CLEARS < 2
+ /* Only newly allocated memory is guaranteed to be cleared. */
+ if (av == &main_arena &&
+ oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *) oldtop)
+ oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *) oldtop);
+# endif
+ if (av != &main_arena)
+ {
+ heap_info *heap = heap_for_ptr (oldtop);
+ if (oldtopsize < (char *) heap + heap->mprotect_size - (char *) oldtop)
+ oldtopsize = (char *) heap + heap->mprotect_size - (char *) oldtop;
+ }
+#endif
+ mem = _int_malloc (av, sz);
+
+
+ assert (!mem || chunk_is_mmapped (mem2chunk (mem)) ||
+ av == arena_for_chunk (mem2chunk (mem)));
+
+ if (mem == 0)
+ {
+ LIBC_PROBE (memory_calloc_retry, 1, sz);
+ av = arena_get_retry (av, sz);
+ if (__builtin_expect (av != NULL, 1))
+ {
+ mem = _int_malloc (av, sz);
+ (void) mutex_unlock (&av->mutex);
+ }
+ if (mem == 0)
+ return 0;
+ }
+ else
+ (void) mutex_unlock (&av->mutex);
+ p = mem2chunk (mem);
+
+ /* Two optional cases in which clearing not necessary */
+ if (chunk_is_mmapped (p))
+ {
+ if (__builtin_expect (perturb_byte, 0))
+ return memset (mem, 0, sz);
+
+ return mem;
+ }
+
+ csz = chunksize (p);
+
+#if MORECORE_CLEARS
+ if (perturb_byte == 0 && (p == oldtop && csz > oldtopsize))
+ {
+ /* clear only the bytes from non-freshly-sbrked memory */
+ csz = oldtopsize;
+ }
+#endif
+
+ /* Unroll clear of <= 36 bytes (72 if 8byte sizes). We know that
+ contents have an odd number of INTERNAL_SIZE_T-sized words;
+ minimally 3. */
+ d = (INTERNAL_SIZE_T *) mem;
+ clearsize = csz - SIZE_SZ;
+ nclears = clearsize / sizeof (INTERNAL_SIZE_T);
+ assert (nclears >= 3);
+
+ if (nclears > 9)
+ return memset (d, 0, clearsize);
+
+ else
+ {
+ *(d + 0) = 0;
+ *(d + 1) = 0;
+ *(d + 2) = 0;
+ if (nclears > 4)
+ {
+ *(d + 3) = 0;
+ *(d + 4) = 0;
+ if (nclears > 6)
+ {
+ *(d + 5) = 0;
+ *(d + 6) = 0;
+ if (nclears > 8)
+ {
+ *(d + 7) = 0;
+ *(d + 8) = 0;
+ }
+ }
+ }
+ }
+
+ return mem;
}
/*