Commit fe0f6df4 by Jason Merrill Committed by Jason Merrill

re PR target/54908 (misc regressions on emutls targets remain from dynamic…

re PR target/54908 (misc regressions on emutls targets remain from dynamic initialization of non-function-local TLS variables)

	PR target/54908
	* libsupc++/atexit_thread.cc: Rewrite to keep the cleanup list
	with get/setspecific.  Destroy the key on dlclose.

From-SVN: r192449
parent 85b5d65a
2012-10-13 Jason Merrill <jason@redhat.com>
* g++.dg/tls/thread_local7g.C: Require tls_native.
2012-10-14 Jason Merrill <jason@redhat.com> 2012-10-14 Jason Merrill <jason@redhat.com>
* g++.dg/cpp0x/inh-ctor1.C: New. * g++.dg/cpp0x/inh-ctor1.C: New.
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
// { dg-require-alias } // { dg-require-alias }
// The reference temp should be TLS, not normal data. // The reference temp should be TLS, not normal data.
// { dg-final { scan-assembler-not "\\.data" } } // { dg-final { scan-assembler-not "\\.data" { target tls_native } } }
thread_local int&& ir = 42; thread_local int&& ir = 42;
......
2012-10-14 Jason Merrill <jason@redhat.com>
PR target/54908
* libsupc++/atexit_thread.cc: Rewrite to keep the cleanup list
with get/setspecific. Destroy the key on dlclose.
2012-10-12 Edward Smith-Rowland <3dw4rd@verizon.net> 2012-10-12 Edward Smith-Rowland <3dw4rd@verizon.net>
* include/ext/random: Add __gnu_cxx::arcsine_distribution<> * include/ext/random: Add __gnu_cxx::arcsine_distribution<>
......
...@@ -27,109 +27,92 @@ ...@@ -27,109 +27,92 @@
#include "bits/gthr.h" #include "bits/gthr.h"
namespace { namespace {
// Data structure for the list of destructors: Singly-linked list // One element in a singly-linked stack of cleanups.
// of arrays. struct elt
class list
{ {
struct elt void (*destructor)(void *);
{ void *object;
void *object; elt *next;
void (*destructor)(void *);
};
static const int max_nelts = 32;
list *next;
int nelts;
elt array[max_nelts];
elt *allocate_elt();
public:
void run();
static void run(void *p);
int add_elt(void (*)(void *), void *);
}; };
// Return the address of an open slot. // Keep a per-thread list of cleanups in gthread_key storage.
list::elt * __gthread_key_t key;
list::allocate_elt() // But also support non-threaded mode.
{ elt *single_thread;
if (nelts < max_nelts)
return &array[nelts++];
if (!next)
next = new (std::nothrow) list();
if (!next)
return 0;
return next->allocate_elt();
}
// Run all the cleanups in the list. // Run the specified stack of cleanups.
void void run (void *p)
list::run()
{ {
for (int i = nelts - 1; i >= 0; --i) elt *e = static_cast<elt*>(p);
array[i].destructor (array[i].object); for (; e; e = e->next)
if (next) e->destructor (e->object);
next->run();
} }
// Static version to use as a callback to __gthread_key_create. // Run the stack of cleanups for the current thread.
void void run ()
list::run(void *p)
{ {
static_cast<list *>(p)->run(); void *e;
if (__gthread_active_p ())
e = __gthread_getspecific (key);
else
e = single_thread;
run (e);
} }
// The list of cleanups is per-thread. // Initialize the key for the cleanup stack. We use a static local for
thread_local list first; // key init/delete rather than atexit so that delete is run on dlclose.
// The pthread data structures for actually running the destructors at
// thread exit are shared. The constructor of the thread-local sentinel
// object in add_elt performs the initialization.
__gthread_key_t key;
__gthread_once_t once = __GTHREAD_ONCE_INIT;
void run_current () { first.run(); }
void key_init() { void key_init() {
__gthread_key_create (&key, list::run); struct key_s {
key_s() { __gthread_key_create (&key, run); }
~key_s() { __gthread_key_delete (key); }
};
static key_s ks;
// Also make sure the destructors are run by std::exit. // Also make sure the destructors are run by std::exit.
// FIXME TLS cleanups should run before static cleanups and atexit // FIXME TLS cleanups should run before static cleanups and atexit
// cleanups. // cleanups.
std::atexit (run_current); std::atexit (run);
} }
struct sentinel }
{
sentinel() extern "C" int
__cxxabiv1::__cxa_thread_atexit (void (*dtor)(void *), void *obj, void */*dso_handle*/)
_GLIBCXX_NOTHROW
{
// Do this initialization once.
if (__gthread_active_p ())
{
// When threads are active use __gthread_once.
static __gthread_once_t once = __GTHREAD_ONCE_INIT;
__gthread_once (&once, key_init);
}
else
{ {
if (__gthread_active_p ()) // And when threads aren't active use a static local guard.
static bool queued;
if (!queued)
{ {
__gthread_once (&once, key_init); queued = true;
__gthread_setspecific (key, &first); std::atexit (run);
} }
else
std::atexit (run_current);
} }
};
// Actually insert an element. elt *first;
int if (__gthread_active_p ())
list::add_elt(void (*dtor)(void *), void *obj) first = static_cast<elt*>(__gthread_getspecific (key));
{ else
thread_local sentinel s; first = single_thread;
elt *e = allocate_elt ();
if (!e)
return -1;
e->object = obj;
e->destructor = dtor;
return 0;
}
}
namespace __cxxabiv1 elt *new_elt = new (std::nothrow) elt;
{ if (!new_elt)
extern "C" int return -1;
__cxa_thread_atexit (void (*dtor)(void *), void *obj, void */*dso_handle*/) new_elt->destructor = dtor;
_GLIBCXX_NOTHROW new_elt->object = obj;
{ new_elt->next = first;
return first.add_elt (dtor, obj);
} if (__gthread_active_p ())
__gthread_setspecific (key, new_elt);
else
single_thread = new_elt;
return 0;
} }
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment