diff options
author | ian <ian@138bc75d-0d04-0410-961f-82ee72b054a4> | 2014-05-09 05:01:08 +0000 |
---|---|---|
committer | ian <ian@138bc75d-0d04-0410-961f-82ee72b054a4> | 2014-05-09 05:01:08 +0000 |
commit | af436d54a40629a21e5aa3b6ca43d1b373e8c87a (patch) | |
tree | 28483b89cea2278d730597de17a15c749c38766e /libbacktrace | |
parent | 80ede13bcecf98757df8698a8d3b4cde1c60521f (diff) | |
download | gcc-af436d54a40629a21e5aa3b6ca43d1b373e8c87a.tar.gz |
* mmap.c (backtrace_free): If freeing a large aligned block of
memory, call munmap rather than holding onto it.
(backtrace_vector_grow): When growing a vector, double the number
of pages requested. When releasing the old version of a grown
vector, pass the correct size to backtrace_free.
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@210256 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'libbacktrace')
-rw-r--r-- | libbacktrace/ChangeLog | 8 | ||||
-rw-r--r-- | libbacktrace/mmap.c | 28 |
2 files changed, 34 insertions, 2 deletions
diff --git a/libbacktrace/ChangeLog b/libbacktrace/ChangeLog index 8a1a8e74a32..adb54d8f5c3 100644 --- a/libbacktrace/ChangeLog +++ b/libbacktrace/ChangeLog @@ -1,3 +1,11 @@ +2014-05-08 Ian Lance Taylor <iant@google.com> + + * mmap.c (backtrace_free): If freeing a large aligned block of + memory, call munmap rather than holding onto it. + (backtrace_vector_grow): When growing a vector, double the number + of pages requested. When releasing the old version of a grown + vector, pass the correct size to backtrace_free. + 2014-03-07 Ian Lance Taylor <iant@google.com> * sort.c (backtrace_qsort): Use middle element as pivot. diff --git a/libbacktrace/mmap.c b/libbacktrace/mmap.c index b530e382388..5a9f6299bc0 100644 --- a/libbacktrace/mmap.c +++ b/libbacktrace/mmap.c @@ -164,6 +164,26 @@ backtrace_free (struct backtrace_state *state, void *addr, size_t size, { int locked; + /* If we are freeing a large aligned block, just release it back to + the system. This case arises when growing a vector for a large + binary with lots of debug info. Calling munmap here may cause us + to call mmap again if there is also a large shared library; we + just live with that. */ + if (size >= 16 * 4096) + { + size_t pagesize; + + pagesize = getpagesize (); + if (((uintptr_t) addr & (pagesize - 1)) == 0 + && (size & (pagesize - 1)) == 0) + { + /* If munmap fails for some reason, just add the block to + the freelist. */ + if (munmap (addr, size) == 0) + return; + } + } + /* If we can acquire the lock, add the new space to the free list. If we can't acquire the lock, just leak the memory. __sync_lock_test_and_set returns the old state of the lock, so we @@ -209,14 +229,18 @@ backtrace_vector_grow (struct backtrace_state *state,size_t size, alc = pagesize; } else - alc = (alc + pagesize - 1) & ~ (pagesize - 1); + { + alc *= 2; + alc = (alc + pagesize - 1) & ~ (pagesize - 1); + } base = backtrace_alloc (state, alc, error_callback, data); if (base == NULL) return NULL; if (vec->base != NULL) { memcpy (base, vec->base, vec->size); - backtrace_free (state, vec->base, vec->alc, error_callback, data); + backtrace_free (state, vec->base, vec->size + vec->alc, + error_callback, data); } vec->base = base; vec->alc = alc - vec->size; |