summaryrefslogtreecommitdiff
path: root/libbacktrace
diff options
context:
space:
mode:
authorian <ian@138bc75d-0d04-0410-961f-82ee72b054a4>2013-01-01 16:13:20 +0000
committerian <ian@138bc75d-0d04-0410-961f-82ee72b054a4>2013-01-01 16:13:20 +0000
commita666b85bd54089c50d9c0577a05afbb6f2dc8b92 (patch)
tree0ea7c16439b042c659c442ddc5377a98e78a3747 /libbacktrace
parentedc63b2f7e52e996c1756fdda0d9f80fd46e928b (diff)
downloadgcc-a666b85bd54089c50d9c0577a05afbb6f2dc8b92.tar.gz
PR other/55536
* mmap.c (backtrace_alloc): Don't call sync functions if not threaded. (backtrace_free): Likewise. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@194768 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'libbacktrace')
-rw-r--r--libbacktrace/ChangeLog8
-rw-r--r--libbacktrace/mmap.c24
2 files changed, 28 insertions, 4 deletions
diff --git a/libbacktrace/ChangeLog b/libbacktrace/ChangeLog
index 9cc767b936b..39802c31109 100644
--- a/libbacktrace/ChangeLog
+++ b/libbacktrace/ChangeLog
@@ -1,3 +1,10 @@
+2013-01-01 Ian Lance Taylor <iant@google.com>
+
+ PR other/55536
+ * mmap.c (backtrace_alloc): Don't call sync functions if not
+ threaded.
+ (backtrace_free): Likewise.
+
2012-12-12 John David Anglin <dave.anglin@nrc-cnrc.gc.ca>
* mmapio.c: Define MAP_FAILED if not defined.
@@ -26,6 +33,7 @@
PR other/55312
* configure.ac: Only add -Werror if building a target library.
+ * configure: Rebuild.
2012-11-12 Ian Lance Taylor <iant@google.com>
Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
diff --git a/libbacktrace/mmap.c b/libbacktrace/mmap.c
index d3313c7cf1e..a6c730ecd23 100644
--- a/libbacktrace/mmap.c
+++ b/libbacktrace/mmap.c
@@ -84,6 +84,7 @@ backtrace_alloc (struct backtrace_state *state,
void *data)
{
void *ret;
+ int locked;
struct backtrace_freelist_struct **pp;
size_t pagesize;
size_t asksize;
@@ -96,7 +97,12 @@ backtrace_alloc (struct backtrace_state *state,
using mmap. __sync_lock_test_and_set returns the old state of
the lock, so we have acquired it if it returns 0. */
- if (!__sync_lock_test_and_set (&state->lock_alloc, 1))
+ if (!state->threaded)
+ locked = 1;
+ else
+ locked = __sync_lock_test_and_set (&state->lock_alloc, 1) == 0;
+
+ if (locked)
{
for (pp = &state->freelist; *pp != NULL; pp = &(*pp)->next)
{
@@ -120,7 +126,8 @@ backtrace_alloc (struct backtrace_state *state,
}
}
- __sync_lock_release (&state->lock_alloc);
+ if (state->threaded)
+ __sync_lock_release (&state->lock_alloc);
}
if (ret == NULL)
@@ -154,15 +161,24 @@ backtrace_free (struct backtrace_state *state, void *addr, size_t size,
backtrace_error_callback error_callback ATTRIBUTE_UNUSED,
void *data ATTRIBUTE_UNUSED)
{
+ int locked;
+
/* If we can acquire the lock, add the new space to the free list.
If we can't acquire the lock, just leak the memory.
__sync_lock_test_and_set returns the old state of the lock, so we
have acquired it if it returns 0. */
- if (!__sync_lock_test_and_set (&state->lock_alloc, 1))
+
+ if (!state->threaded)
+ locked = 1;
+ else
+ locked = __sync_lock_test_and_set (&state->lock_alloc, 1) == 0;
+
+ if (locked)
{
backtrace_free_locked (state, addr, size);
- __sync_lock_release (&state->lock_alloc);
+ if (state->threaded)
+ __sync_lock_release (&state->lock_alloc);
}
}