summaryrefslogtreecommitdiff
path: root/malloc/malloc.c
diff options
context:
space:
mode:
authorUlrich Drepper <drepper@redhat.com>2009-07-16 09:54:34 -0700
committerUlrich Drepper <drepper@redhat.com>2009-07-16 09:54:34 -0700
commitbec466d922ee22b94ac0d00415fb605e136efe6e (patch)
tree52e72934969d628a3d301e84f1dc66aa9f2ec932 /malloc/malloc.c
parentbea0ac1d8703091294fe5822d982591c849b5458 (diff)
downloadglibc-bec466d922ee22b94ac0d00415fb605e136efe6e.tar.gz
Fix race in corruption check.
With atomic fastbins the checks performed can race with concurrent modifications of the arena. If we detect a problem re-do the test after getting the lock.
Diffstat (limited to 'malloc/malloc.c')
-rw-r--r--malloc/malloc.c25
1 files changed, 23 insertions, 2 deletions
diff --git a/malloc/malloc.c b/malloc/malloc.c
index 0c0182ec0b..a459a2b89d 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -4799,8 +4799,29 @@ _int_free(mstate av, mchunkptr p)
|| __builtin_expect (chunksize (chunk_at_offset (p, size))
>= av->system_mem, 0))
{
- errstr = "free(): invalid next size (fast)";
- goto errout;
+#ifdef ATOMIC_FASTBINS
+ /* We might not have a lock at this point and concurrent modifications
+ of system_mem might have let to a false positive. Redo the test
+ after getting the lock. */
+ if (have_lock
+ || ({ assert (locked == 0);
+ mutex_lock(&av->mutex);
+ locked = 1;
+ chunk_at_offset (p, size)->size <= 2 * SIZE_SZ
+ || chunksize (chunk_at_offset (p, size)) >= av->system_mem;
+ }))
+#endif
+ {
+ errstr = "free(): invalid next size (fast)";
+ goto errout;
+ }
+#ifdef ATOMIC_FASTBINS
+ if (! have_lock)
+ {
+ (void)mutex_unlock(&av->mutex);
+ locked = 0;
+ }
+#endif
}
if (__builtin_expect (perturb_byte, 0))