summaryrefslogtreecommitdiff
path: root/malloc
diff options
context:
space:
mode:
authorOndřej Bílka <neleai@seznam.cz>2013-10-30 16:24:38 +0100
committerOndřej Bílka <neleai@seznam.cz>2013-10-30 16:25:21 +0100
commitc6e4925d4069d38843c02994ffd284e8c87c8929 (patch)
tree5558a3ae83abb2b52818add3185bea2127cd7a39 /malloc
parentbbea82f7fe8af40fd08e8956e1aaf4d877168652 (diff)
downloadglibc-c6e4925d4069d38843c02994ffd284e8c87c8929.tar.gz
Use atomic operations to track memory. Fixes bug 11087
Diffstat (limited to 'malloc')
-rw-r--r--malloc/malloc.c23
1 files changed, 11 insertions, 12 deletions
diff --git a/malloc/malloc.c b/malloc/malloc.c
index 1a18c3f5f2..79025b16d9 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -2253,7 +2253,6 @@ static void* sysmalloc(INTERNAL_SIZE_T nb, mstate av)
mchunkptr remainder; /* remainder from allocation */
unsigned long remainder_size; /* its size */
- unsigned long sum; /* for updating stats */
size_t pagemask = GLRO(dl_pagesize) - 1;
bool tried_mmap = false;
@@ -2325,12 +2324,12 @@ static void* sysmalloc(INTERNAL_SIZE_T nb, mstate av)
/* update statistics */
- if (++mp_.n_mmaps > mp_.max_n_mmaps)
- mp_.max_n_mmaps = mp_.n_mmaps;
+ int new = atomic_exchange_and_add (&mp_.n_mmaps, 1) + 1;
+ atomic_max (&mp_.max_n_mmaps, new);
- sum = mp_.mmapped_mem += size;
- if (sum > (unsigned long)(mp_.max_mmapped_mem))
- mp_.max_mmapped_mem = sum;
+ unsigned long sum;
+ sum = atomic_exchange_and_add(&mp_.mmapped_mem, size) + size;
+ atomic_max (&mp_.max_mmapped_mem, sum);
check_chunk(av, p);
@@ -2780,8 +2779,8 @@ munmap_chunk(mchunkptr p)
return;
}
- mp_.n_mmaps--;
- mp_.mmapped_mem -= total_size;
+ atomic_decrement (&mp_.n_mmaps);
+ atomic_add (&mp_.mmapped_mem, -total_size);
/* If munmap failed the process virtual memory address space is in a
bad shape. Just leave the block hanging around, the process will
@@ -2822,10 +2821,10 @@ mremap_chunk(mchunkptr p, size_t new_size)
assert((p->prev_size == offset));
set_head(p, (new_size - offset)|IS_MMAPPED);
- mp_.mmapped_mem -= size + offset;
- mp_.mmapped_mem += new_size;
- if ((unsigned long)mp_.mmapped_mem > (unsigned long)mp_.max_mmapped_mem)
- mp_.max_mmapped_mem = mp_.mmapped_mem;
+ INTERNAL_SIZE_T new;
+ new = atomic_exchange_and_add (&mp_.mmapped_mem, new_size - size - offset)
+ + new_size - size - offset;
+ atomic_max (&mp_.max_mmapped_mem, new);
return p;
}