Commit 2a5195d9 by Ian Lance Taylor Committed by Ian Lance Taylor

re PR other/55536 (libbacktrace abort in backtrace_alloc at mmap.c:99 running btest)

	PR other/55536
	* mmap.c (backtrace_alloc): Don't call sync functions if not
	threaded.
	(backtrace_free): Likewise.

From-SVN: r194768
parent 459a58e1
2013-01-01 Ian Lance Taylor <iant@google.com>
PR other/55536
* mmap.c (backtrace_alloc): Don't call sync functions if not
threaded.
(backtrace_free): Likewise.
2012-12-12 John David Anglin <dave.anglin@nrc-cnrc.gc.ca> 2012-12-12 John David Anglin <dave.anglin@nrc-cnrc.gc.ca>
* mmapio.c: Define MAP_FAILED if not defined. * mmapio.c: Define MAP_FAILED if not defined.
...@@ -26,6 +33,7 @@ ...@@ -26,6 +33,7 @@
PR other/55312 PR other/55312
* configure.ac: Only add -Werror if building a target library. * configure.ac: Only add -Werror if building a target library.
* configure: Rebuild.
2012-11-12 Ian Lance Taylor <iant@google.com> 2012-11-12 Ian Lance Taylor <iant@google.com>
Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE> Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
......
...@@ -84,6 +84,7 @@ backtrace_alloc (struct backtrace_state *state, ...@@ -84,6 +84,7 @@ backtrace_alloc (struct backtrace_state *state,
void *data) void *data)
{ {
void *ret; void *ret;
int locked;
struct backtrace_freelist_struct **pp; struct backtrace_freelist_struct **pp;
size_t pagesize; size_t pagesize;
size_t asksize; size_t asksize;
...@@ -96,7 +97,12 @@ backtrace_alloc (struct backtrace_state *state, ...@@ -96,7 +97,12 @@ backtrace_alloc (struct backtrace_state *state,
using mmap. __sync_lock_test_and_set returns the old state of using mmap. __sync_lock_test_and_set returns the old state of
the lock, so we have acquired it if it returns 0. */ the lock, so we have acquired it if it returns 0. */
if (!__sync_lock_test_and_set (&state->lock_alloc, 1)) if (!state->threaded)
locked = 1;
else
locked = __sync_lock_test_and_set (&state->lock_alloc, 1) == 0;
if (locked)
{ {
for (pp = &state->freelist; *pp != NULL; pp = &(*pp)->next) for (pp = &state->freelist; *pp != NULL; pp = &(*pp)->next)
{ {
...@@ -120,7 +126,8 @@ backtrace_alloc (struct backtrace_state *state, ...@@ -120,7 +126,8 @@ backtrace_alloc (struct backtrace_state *state,
} }
} }
__sync_lock_release (&state->lock_alloc); if (state->threaded)
__sync_lock_release (&state->lock_alloc);
} }
if (ret == NULL) if (ret == NULL)
...@@ -154,15 +161,24 @@ backtrace_free (struct backtrace_state *state, void *addr, size_t size, ...@@ -154,15 +161,24 @@ backtrace_free (struct backtrace_state *state, void *addr, size_t size,
backtrace_error_callback error_callback ATTRIBUTE_UNUSED, backtrace_error_callback error_callback ATTRIBUTE_UNUSED,
void *data ATTRIBUTE_UNUSED) void *data ATTRIBUTE_UNUSED)
{ {
int locked;
/* If we can acquire the lock, add the new space to the free list. /* If we can acquire the lock, add the new space to the free list.
If we can't acquire the lock, just leak the memory. If we can't acquire the lock, just leak the memory.
__sync_lock_test_and_set returns the old state of the lock, so we __sync_lock_test_and_set returns the old state of the lock, so we
have acquired it if it returns 0. */ have acquired it if it returns 0. */
if (!__sync_lock_test_and_set (&state->lock_alloc, 1))
if (!state->threaded)
locked = 1;
else
locked = __sync_lock_test_and_set (&state->lock_alloc, 1) == 0;
if (locked)
{ {
backtrace_free_locked (state, addr, size); backtrace_free_locked (state, addr, size);
__sync_lock_release (&state->lock_alloc); if (state->threaded)
__sync_lock_release (&state->lock_alloc);
} }
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment