summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorak <ak@138bc75d-0d04-0410-961f-82ee72b054a4>2011-10-29 01:01:54 +0000
committerak <ak@138bc75d-0d04-0410-961f-82ee72b054a4>2011-10-29 01:01:54 +0000
commite8b7c6129e3b158a97c39a3213a58b40eb47f1b4 (patch)
tree2071cd9a9389dac958320f1bd9ae2bb9c110cc2b
parent76f0411d965454c0b994b5d5c0728771e04b3c35 (diff)
downloadgcc-e8b7c6129e3b158a97c39a3213a58b40eb47f1b4.tar.gz
Free large chunks in ggc v2
This implements the freeing back of large chunks in the ggc madvise path Richard Guenther asked for. This way on systems with limited address space malloc() and other allocators still have a chance to get back at some of the memory ggc freed. The fragmented pages are still just given back, but the address space stays allocated. I tried freeing only aligned 2MB areas to optimize for 2MB huge pages, but the hit rate was quite low, so I switched to 1MB+ unaligned areas. v2: Hardcode free unit size instead of param gcc/: 2011-10-18 Andi Kleen <ak@linux.intel.com> * ggc-page (release_pages): First free large continuous chunks in the madvise path. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@180648 138bc75d-0d04-0410-961f-82ee72b054a4
-rw-r--r--gcc/ChangeLog5
-rw-r--r--gcc/ggc-page.c48
2 files changed, 53 insertions, 0 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 65df15b4005..3b2473463b5 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,5 +1,10 @@
2011-10-18 Andi Kleen <ak@linux.intel.com>
+ * ggc-page (release_pages): First free large continuous
+ chunks in the madvise path.
+
+2011-10-18 Andi Kleen <ak@linux.intel.com>
+
* ggc-page.c (alloc_pages): Always round up entry_size.
2011-10-19 Andi Kleen <ak@linux.intel.com>
diff --git a/gcc/ggc-page.c b/gcc/ggc-page.c
index 077bc8e2bee..7bef4c02d34 100644
--- a/gcc/ggc-page.c
+++ b/gcc/ggc-page.c
@@ -973,6 +973,54 @@ release_pages (void)
page_entry *p, *start_p;
char *start;
size_t len;
+ size_t mapped_len;
+ page_entry *next, *prev, *newprev;
+ size_t free_unit = (GGC_QUIRE_SIZE/2) * G.pagesize;
+
+ /* First free larger continuous areas to the OS.
+ This allows other allocators to grab these areas if needed.
+ This is only done on larger chunks to avoid fragmentation.
+ This does not always work because the free_pages list is only
+ approximately sorted. */
+
+ p = G.free_pages;
+ prev = NULL;
+ while (p)
+ {
+ start = p->page;
+ start_p = p;
+ len = 0;
+ mapped_len = 0;
+ newprev = prev;
+ while (p && p->page == start + len)
+ {
+ len += p->bytes;
+ if (!p->discarded)
+ mapped_len += p->bytes;
+ newprev = p;
+ p = p->next;
+ }
+ if (len >= free_unit)
+ {
+ while (start_p != p)
+ {
+ next = start_p->next;
+ free (start_p);
+ start_p = next;
+ }
+ munmap (start, len);
+ if (prev)
+ prev->next = p;
+ else
+ G.free_pages = p;
+ G.bytes_mapped -= mapped_len;
+ continue;
+ }
+ prev = newprev;
+ }
+
+ /* Now give back the fragmented pages to the OS, but keep the address
+ space to reuse it next time. */
for (p = G.free_pages; p; )
{