aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMaxim Kuvyrkov <maxim@kugelworks.com>2013-12-24 09:44:50 +1300
committerMaxim Kuvyrkov <maxim@kugelworks.com>2014-01-05 14:58:57 +1300
commit3db0119ef56decc402827d5b84774fa4487f6cd4 (patch)
tree3b6095bfdf6353360a8f4f7a1be02d6b2d78f8ed
parent15256e58adc62d8d62c03b2b3ba526b468758702 (diff)
downloadglibc-3db0119ef56decc402827d5b84774fa4487f6cd4.tar
glibc-3db0119ef56decc402827d5b84774fa4487f6cd4.tar.gz
glibc-3db0119ef56decc402827d5b84774fa4487f6cd4.tar.bz2
glibc-3db0119ef56decc402827d5b84774fa4487f6cd4.zip
Fix race in free() of fastbin chunk: BZ #15073
Perform sanity check only if we have_lock. Due to lockless nature of fastbins we need to be careful derefencing pointers to fastbin entries (chunksize(old) in this case) in multithreaded environments. The fix is to add have_lock to the if-condition checks. The rest of the patch only makes code more readable. * malloc/malloc.c (_int_free): Perform sanity check only if we have_lock. Conflicts: ChangeLog NEWS
-rw-r--r--ChangeLog7
-rw-r--r--NEWS2
-rw-r--r--malloc/malloc.c20
3 files changed, 20 insertions, 9 deletions
diff --git a/ChangeLog b/ChangeLog
index b76e7dc06e..49bf8f9001 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,10 @@
+2014-01-04 Maxim Kuvyrkov <maxim@kugelworks.com>
+ Ondřej Bílka <neleai@seznam.cz>
+
+ [BZ #15073]
+ * malloc/malloc.c (_int_free): Perform sanity check only if we
+ have_lock.
+
2013-02-08 Carlos O'Donell <carlos@redhat.com>
[BZ #15006]
diff --git a/NEWS b/NEWS
index 10c1ca62ac..c9cf487624 100644
--- a/NEWS
+++ b/NEWS
@@ -8,7 +8,7 @@ using `glibc' in the "product" field.
Version 2.17.1
* The following bugs are resolved with this release:
- 15003, 15006, 15122, 15759.
+ 15003, 15006, 15073, 15122, 15759.
Version 2.17
diff --git a/malloc/malloc.c b/malloc/malloc.c
index 3f0b6b1cd4..acf3d4202f 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -3816,25 +3816,29 @@ _int_free(mstate av, mchunkptr p, int have_lock)
unsigned int idx = fastbin_index(size);
fb = &fastbin (av, idx);
- mchunkptr fd;
- mchunkptr old = *fb;
+ /* Atomically link P to its fastbin: P->FD = *FB; *FB = P; */
+ mchunkptr old = *fb, old2;
unsigned int old_idx = ~0u;
do
{
- /* Another simple check: make sure the top of the bin is not the
- record we are going to add (i.e., double free). */
+ /* Check that the top of the bin is not the record we are going to add
+ (i.e., double free). */
if (__builtin_expect (old == p, 0))
{
errstr = "double free or corruption (fasttop)";
goto errout;
}
- if (old != NULL)
+ /* Check that size of fastbin chunk at the top is the same as
+ size of the chunk that we are adding. We can dereference OLD
+ only if we have the lock, otherwise it might have already been
+ deallocated. See use of OLD_IDX below for the actual check. */
+ if (have_lock && old != NULL)
old_idx = fastbin_index(chunksize(old));
- p->fd = fd = old;
+ p->fd = old2 = old;
}
- while ((old = catomic_compare_and_exchange_val_rel (fb, p, fd)) != fd);
+ while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2)) != old2);
- if (fd != NULL && __builtin_expect (old_idx != idx, 0))
+ if (have_lock && old != NULL && __builtin_expect (old_idx != idx, 0))
{
errstr = "invalid fastbin entry (free)";
goto errout;