diff options
author | Maxim Kuvyrkov <maxim@kugelworks.com> | 2013-12-24 09:44:50 +1300 |
---|---|---|
committer | Maxim Kuvyrkov <maxim@kugelworks.com> | 2013-12-24 09:44:50 +1300 |
commit | 362b47fe09ca9a928d444c7e2f7992f7f61bfc3e (patch) | |
tree | 6279f6a26cf21a076aeee89081d4cc350ed8dc74 | |
parent | b9bcbbcbe7afa94442d335811d4a1c1e0c0a1daf (diff) | |
download | glibc-362b47fe09ca9a928d444c7e2f7992f7f61bfc3e.tar glibc-362b47fe09ca9a928d444c7e2f7992f7f61bfc3e.tar.gz glibc-362b47fe09ca9a928d444c7e2f7992f7f61bfc3e.tar.bz2 glibc-362b47fe09ca9a928d444c7e2f7992f7f61bfc3e.zip |
Fix race in free() of fastbin chunk: BZ #15073
Perform sanity check only if we have_lock. Due to lockless nature of fastbins
we need to be careful derefencing pointers to fastbin entries (chunksize(old)
in this case) in multithreaded environments.
The fix is to add have_lock to the if-condition checks. The rest of the patch
only makes code more readable.
* malloc/malloc.c (_int_free): Perform sanity check only if we
have_lock.
-rw-r--r-- | ChangeLog | 7 | ||||
-rw-r--r-- | NEWS | 22 | ||||
-rw-r--r-- | malloc/malloc.c | 20 |
3 files changed, 30 insertions, 19 deletions
@@ -1,3 +1,10 @@ +2013-12-24 Maxim Kuvyrkov <maxim@kugelworks.com> + Ondřej Bílka <neleai@seznam.cz> + + [BZ #15073] + * malloc/malloc.c (_int_free): Perform sanity check only if we + have_lock. + 2013-12-23 Ondřej Bílka <neleai@seznam.cz> [BZ #12986] @@ -12,17 +12,17 @@ Version 2.19 156, 387, 431, 832, 926, 2801, 4772, 6786, 6787, 6807, 6810, 7003, 9954, 10253, 10278, 11087, 11157, 11214, 12100, 12486, 12986, 13028, 13982, 13985, 14029, 14032, 14120, 14143, 14155, 14547, 14699, 14752, 14876, - 14910, 15004, 15048, 15089, 15128, 15218, 15268, 15277, 15308, 15362, - 15374, 15400, 15425, 15427, 15483, 15522, 15531, 15532, 15593, 15601, - 15608, 15609, 15610, 15632, 15640, 15670, 15672, 15680, 15681, 15723, - 15734, 15735, 15736, 15748, 15749, 15754, 15760, 15763, 15764, 15797, - 15799, 15825, 15843, 15844, 15846, 15847, 15849, 15855, 15856, 15857, - 15859, 15867, 15886, 15887, 15890, 15892, 15893, 15895, 15897, 15901, - 15905, 15909, 15915, 15917, 15919, 15921, 15923, 15939, 15941, 15948, - 15963, 15966, 15985, 15988, 15997, 16032, 16034, 16036, 16037, 16038, - 16041, 16055, 16071, 16072, 16074, 16077, 16078, 16103, 16112, 16143, - 16144, 16146, 16150, 16151, 16153, 16167, 16172, 16195, 16214, 16245, - 16356. + 14910, 15004, 15048, 15073, 15089, 15128, 15218, 15268, 15277, 15308, + 15362, 15374, 15400, 15425, 15427, 15483, 15522, 15531, 15532, 15593, + 15601, 15608, 15609, 15610, 15632, 15640, 15670, 15672, 15680, 15681, + 15723, 15734, 15735, 15736, 15748, 15749, 15754, 15760, 15763, 15764, + 15797, 15799, 15825, 15843, 15844, 15846, 15847, 15849, 15855, 15856, + 15857, 15859, 15867, 15886, 15887, 15890, 15892, 15893, 15895, 15897, + 15901, 15905, 15909, 15915, 15917, 15919, 15921, 15923, 15939, 15941, + 15948, 15963, 15966, 15985, 15988, 15997, 16032, 16034, 16036, 16037, + 16038, 16041, 16055, 16071, 16072, 16074, 16077, 16078, 16103, 16112, + 16143, 16144, 16146, 16150, 16151, 16153, 16167, 16172, 16195, 16214, + 16245, 16356. * The public headers no longer use __unused nor __block. This change is to support compiling programs that are derived from BSD sources and use diff --git a/malloc/malloc.c b/malloc/malloc.c index b1668b501b..5e419adac4 100644 --- a/malloc/malloc.c +++ b/malloc/malloc.c @@ -3783,25 +3783,29 @@ _int_free(mstate av, mchunkptr p, int have_lock) unsigned int idx = fastbin_index(size); fb = &fastbin (av, idx); - mchunkptr fd; - mchunkptr old = *fb; + /* Atomically link P to its fastbin: P->FD = *FB; *FB = P; */ + mchunkptr old = *fb, old2; unsigned int old_idx = ~0u; do { - /* Another simple check: make sure the top of the bin is not the - record we are going to add (i.e., double free). */ + /* Check that the top of the bin is not the record we are going to add + (i.e., double free). */ if (__builtin_expect (old == p, 0)) { errstr = "double free or corruption (fasttop)"; goto errout; } - if (old != NULL) + /* Check that size of fastbin chunk at the top is the same as + size of the chunk that we are adding. We can dereference OLD + only if we have the lock, otherwise it might have already been + deallocated. See use of OLD_IDX below for the actual check. */ + if (have_lock && old != NULL) old_idx = fastbin_index(chunksize(old)); - p->fd = fd = old; + p->fd = old2 = old; } - while ((old = catomic_compare_and_exchange_val_rel (fb, p, fd)) != fd); + while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2)) != old2); - if (fd != NULL && __builtin_expect (old_idx != idx, 0)) + if (have_lock && old != NULL && __builtin_expect (old_idx != idx, 0)) { errstr = "invalid fastbin entry (free)"; goto errout; |