summaryrefslogtreecommitdiff
path: root/malloc
diff options
context:
space:
mode:
authorJakub Jelinek <jakub@redhat.com>2004-10-04 08:56:18 +0000
committerJakub Jelinek <jakub@redhat.com>2004-10-04 08:56:18 +0000
commit85148842d401edf64f9edee7e5819a947c289ed2 (patch)
tree42f228e998070f60c3bdb2018c9921b221a6851b /malloc
parent6d96590587deec027c04fe576f11cff0f445eb32 (diff)
downloadglibc-85148842d401edf64f9edee7e5819a947c289ed2.tar.gz
Updated to fedora-glibc-20041004T0747cvs/fedora-glibc-2_3_3-64
Diffstat (limited to 'malloc')
-rw-r--r--malloc/arena.c62
-rw-r--r--malloc/malloc.c20
2 files changed, 65 insertions, 17 deletions
diff --git a/malloc/arena.c b/malloc/arena.c
index 3adfbc45f8..00f40971f3 100644
--- a/malloc/arena.c
+++ b/malloc/arena.c
@@ -550,6 +550,16 @@ dump_heap(heap) heap_info *heap;
#endif /* MALLOC_DEBUG > 1 */
+/* If consecutive mmap (0, HEAP_MAX_SIZE << 1, ...) calls return decreasing
+ addresses as opposed to increasing, new_heap would badly fragment the
+ address space. In that case remember the second HEAP_MAX_SIZE part
+ aligned to HEAP_MAX_SIZE from last mmap (0, HEAP_MAX_SIZE << 1, ...)
+ call (if it is already aligned) and try to reuse it next time. We need
+ no locking for it, as kernel ensures the atomicity for us - worst case
+ we'll call mmap (addr, HEAP_MAX_SIZE, ...) for some value of addr in
+ multiple threads, but only one will succeed. */
+static char *aligned_heap_area;
+
/* Create a new heap. size is automatically rounded up to a multiple
of the page size. */
@@ -580,21 +590,38 @@ new_heap(size, top_pad) size_t size, top_pad;
No swap space needs to be reserved for the following large
mapping (on Linux, this is the case for all non-writable mappings
anyway). */
- p1 = (char *)MMAP(0, HEAP_MAX_SIZE<<1, PROT_NONE, MAP_PRIVATE|MAP_NORESERVE);
- if(p1 != MAP_FAILED) {
- p2 = (char *)(((unsigned long)p1 + (HEAP_MAX_SIZE-1)) & ~(HEAP_MAX_SIZE-1));
- ul = p2 - p1;
- munmap(p1, ul);
- munmap(p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul);
- } else {
- /* Try to take the chance that an allocation of only HEAP_MAX_SIZE
- is already aligned. */
- p2 = (char *)MMAP(0, HEAP_MAX_SIZE, PROT_NONE, MAP_PRIVATE|MAP_NORESERVE);
- if(p2 == MAP_FAILED)
- return 0;
- if((unsigned long)p2 & (HEAP_MAX_SIZE-1)) {
+ p2 = MAP_FAILED;
+ if(aligned_heap_area) {
+ p2 = (char *)MMAP(aligned_heap_area, HEAP_MAX_SIZE, PROT_NONE,
+ MAP_PRIVATE|MAP_NORESERVE);
+ aligned_heap_area = NULL;
+ if (p2 != MAP_FAILED && ((unsigned long)p2 & (HEAP_MAX_SIZE-1))) {
munmap(p2, HEAP_MAX_SIZE);
- return 0;
+ p2 = MAP_FAILED;
+ }
+ }
+ if(p2 == MAP_FAILED) {
+ p1 = (char *)MMAP(0, HEAP_MAX_SIZE<<1, PROT_NONE,
+ MAP_PRIVATE|MAP_NORESERVE);
+ if(p1 != MAP_FAILED) {
+ p2 = (char *)(((unsigned long)p1 + (HEAP_MAX_SIZE-1))
+ & ~(HEAP_MAX_SIZE-1));
+ ul = p2 - p1;
+ if (ul)
+ munmap(p1, ul);
+ else
+ aligned_heap_area = p2 + HEAP_MAX_SIZE;
+ munmap(p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul);
+ } else {
+ /* Try to take the chance that an allocation of only HEAP_MAX_SIZE
+ is already aligned. */
+ p2 = (char *)MMAP(0, HEAP_MAX_SIZE, PROT_NONE, MAP_PRIVATE|MAP_NORESERVE);
+ if(p2 == MAP_FAILED)
+ return 0;
+ if((unsigned long)p2 & (HEAP_MAX_SIZE-1)) {
+ munmap(p2, HEAP_MAX_SIZE);
+ return 0;
+ }
}
}
if(mprotect(p2, size, PROT_READ|PROT_WRITE) != 0) {
@@ -644,7 +671,12 @@ grow_heap(h, diff) heap_info *h; long diff;
/* Delete a heap. */
-#define delete_heap(heap) munmap((char*)(heap), HEAP_MAX_SIZE)
+#define delete_heap(heap) \
+ do { \
+ if ((char *)(heap) + HEAP_MAX_SIZE == aligned_heap_area) \
+ aligned_heap_area = NULL; \
+ munmap((char*)(heap), HEAP_MAX_SIZE); \
+ } while (0)
static int
internal_function
diff --git a/malloc/malloc.c b/malloc/malloc.c
index dcc54c46f1..a41171490a 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -1709,7 +1709,7 @@ struct malloc_chunk {
chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Size of previous chunk, if allocated | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Size of chunk, in bytes |P|
+ | Size of chunk, in bytes |M|P|
mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| User data starts here... .
. .
@@ -1771,7 +1771,7 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
MINSIZE bytes long, it is replenished.
2. Chunks allocated via mmap, which have the second-lowest-order
- bit (IS_MMAPPED) set in their size fields. Because they are
+ bit M (IS_MMAPPED) set in their size fields. Because they are
allocated one-by-one, each must contain its own trailing size field.
*/
@@ -3530,6 +3530,13 @@ public_vALLOc(size_t bytes)
if(__malloc_initialized < 0)
ptmalloc_init ();
+
+ __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
+ __const __malloc_ptr_t)) =
+ __memalign_hook;
+ if (hook != NULL)
+ return (*hook)(mp_.pagesize, bytes, RETURN_ADDRESS (0));
+
arena_get(ar_ptr, bytes + mp_.pagesize + MINSIZE);
if(!ar_ptr)
return 0;
@@ -3546,6 +3553,15 @@ public_pVALLOc(size_t bytes)
if(__malloc_initialized < 0)
ptmalloc_init ();
+
+ __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
+ __const __malloc_ptr_t)) =
+ __memalign_hook;
+ if (hook != NULL)
+ return (*hook)(mp_.pagesize,
+ (bytes + mp_.pagesize - 1) & ~(mp_.pagesize - 1),
+ RETURN_ADDRESS (0));
+
arena_get(ar_ptr, bytes + 2*mp_.pagesize + MINSIZE);
p = _int_pvalloc(ar_ptr, bytes);
(void)mutex_unlock(&ar_ptr->mutex);