Commit 40aac948 by Jason Merrill Committed by Jason Merrill

re PR c++/13684 (local static object variable constructed once but ctors and…

re PR c++/13684 (local static object variable constructed once but ctors and dtors called multiple times on same memory when called in multiple threads)

        PR c++/13684
        * cp/decl.c (expand_static_init): Use thread-safety API.
        (register_dtor_fn): Return the call, don't expand it.
        * cp/tree.c (add_stmt_to_compound): New fn.
        (stabilize_call): Use it.
        * gimplify.c (gimplify_cleanup_point_expr): Handle CLEANUP_EH_ONLY.
        (gimple_push_cleanup): Add eh_only parm.
        (gimplify_target_expr): Pass it.
        * c.opt (-fno-threadsafe-statics): New option.
        * c-opts.c (c_common_handle_option): Handle it.
        * c-common.h (flag_threadsafe_statics): Declare it.
        * c-common.c (flag_threadsafe_statics): Record it.
        * doc/invoke.texi: Document it.
        * tsystem.h (_GNU_SOURCE): Define.
        * gthr-posix.h (__gthread_recursive_mutex_t): New typedef.
        (__GTHREAD_RECURSIVE_MUTEX_INIT): New macro.
        (__GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION): New macro.
        (__gthread_recursive_mutex_init_function): New fn.
        (__gthread_recursive_mutex_lock): New fn.
        (__gthread_recursive_mutex_trylock): New fn.
        (__gthread_recursive_mutex_unlock): New fn.
        * gthr-solaris.h, gthr-single.h, gthr-dce.h: Likewise.
        * gthr-win32.h, gthr-vxworks.h: Likewise.
        * gthr.h: Document.

        * libsupc++/guard.cc (static_mutex): Internal class implementing a
        recursive mutex which controls initialization of local statics.
        (__gnu_cxx::recursive_init): New exception class.
        (__cxa_guard_acquire): Deal with locking and recursion detection.
        (acquire_1, __cxa_guard_abort, __cxa_guard_release): Likewise.

From-SVN: r86687
parent ed347998
2004-08-27 Jason Merrill <jason@redhat.com>
PR c++/13684
* gimplify.c (gimplify_cleanup_point_expr): Handle CLEANUP_EH_ONLY.
(gimple_push_cleanup): Add eh_only parm.
(gimplify_target_expr): Pass it.
* c.opt (-fno-threadsafe-statics): New option.
* c-opts.c (c_common_handle_option): Handle it.
* c-common.h (flag_threadsafe_statics): Declare it.
* c-common.c (flag_threadsafe_statics): Record it.
* doc/invoke.texi: Document it.
* tsystem.h (_GNU_SOURCE): Define.
* gthr-posix.h (__gthread_recursive_mutex_t): New typedef.
(__GTHREAD_RECURSIVE_MUTEX_INIT): New macro.
(__GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION): New macro.
(__gthread_recursive_mutex_init_function): New fn.
(__gthread_recursive_mutex_lock): New fn.
(__gthread_recursive_mutex_trylock): New fn.
(__gthread_recursive_mutex_unlock): New fn.
* gthr-solaris.h, gthr-single.h, gthr-dce.h: Likewise.
* gthr-win32.h, gthr-vxworks.h: Likewise.
* gthr.h: Document.
2004-08-27 David Edelsohn <edelsohn@gnu.org> 2004-08-27 David Edelsohn <edelsohn@gnu.org>
* config/rs6000/rs6000.c (rs6000_override_options): Increase * config/rs6000/rs6000.c (rs6000_override_options): Increase
......
...@@ -459,6 +459,11 @@ int flag_permissive; ...@@ -459,6 +459,11 @@ int flag_permissive;
int flag_enforce_eh_specs = 1; int flag_enforce_eh_specs = 1;
/* Nonzero means to generate thread-safe code for initializing local
statics. */
int flag_threadsafe_statics = 1;
/* Nonzero means warn about implicit declarations. */ /* Nonzero means warn about implicit declarations. */
int warn_implicit = 1; int warn_implicit = 1;
......
...@@ -571,6 +571,11 @@ extern int flag_permissive; ...@@ -571,6 +571,11 @@ extern int flag_permissive;
extern int flag_enforce_eh_specs; extern int flag_enforce_eh_specs;
/* Nonzero (the default) means to generate thread-safe code for
initializing local statics. */
extern int flag_threadsafe_statics;
/* Nonzero means warn about implicit declarations. */ /* Nonzero means warn about implicit declarations. */
extern int warn_implicit; extern int warn_implicit;
......
...@@ -773,6 +773,10 @@ c_common_handle_option (size_t scode, const char *arg, int value) ...@@ -773,6 +773,10 @@ c_common_handle_option (size_t scode, const char *arg, int value)
flag_weak = value; flag_weak = value;
break; break;
case OPT_fthreadsafe_statics:
flag_threadsafe_statics = value;
break;
case OPT_fzero_link: case OPT_fzero_link:
flag_zero_link = value; flag_zero_link = value;
break; break;
......
...@@ -675,6 +675,10 @@ C++ ObjC++ Joined RejectNegative UInteger ...@@ -675,6 +675,10 @@ C++ ObjC++ Joined RejectNegative UInteger
fthis-is-variable fthis-is-variable
C++ ObjC++ C++ ObjC++
fthreadsafe-statics
C++ ObjC++
-fno-threadsafe-statics Do not generate thread-safe code for initializing local statics.
funsigned-bitfields funsigned-bitfields
C ObjC C++ ObjC++ C ObjC C++ ObjC++
When \"signed\" or \"unsigned\" is not given make the bitfield unsigned When \"signed\" or \"unsigned\" is not given make the bitfield unsigned
......
...@@ -182,3 +182,73 @@ __gthr_win32_mutex_unlock (__gthread_mutex_t *mutex) ...@@ -182,3 +182,73 @@ __gthr_win32_mutex_unlock (__gthread_mutex_t *mutex)
else else
return 0; return 0;
} }
void
__gthr_win32_recursive_mutex_init_function (__gthread_recursive_mutex_t *mutex)
{
mutex->counter = -1;
mutex->depth = 0;
mutex->owner = 0;
mutex->sema = CreateSemaphore (NULL, 0, 65535, NULL);
}
int
__gthr_win32_recursive_mutex_lock (__gthread_recursive_mutex_t *mutex)
{
DWORD me = GetCurrentThreadId();
if (InterlockedIncrement (&mutex->counter) == 0)
{
mutex->depth = 1;
mutex->owner = me;
}
else if (mutex->owner == me)
{
InterlockedDecrement (&mx->lock_idx);
++(mutex->depth);
}
else if (WaitForSingleObject (mutex->sema, INFINITE) == WAIT_OBJECT_0)
{
mutex->depth = 1;
mutex->owner = me;
}
else
{
/* WaitForSingleObject returns WAIT_FAILED, and we can only do
some best-effort cleanup here. */
InterlockedDecrement (&mutex->counter);
return 1;
}
return 0;
}
int
__gthr_win32_recursive_mutex_trylock (__gthread_recursive_mutex_t *mutex)
{
DWORD me = GetCurrentThreadId();
if (__GTHR_W32_InterlockedCompareExchange (&mutex->counter, 0, -1) < 0)
{
mutex->depth = 1;
mutex->owner = me;
}
else if (mutex->owner == me)
++(mutex->depth);
else
return 1;
return 0;
}
int
__gthr_win32_recursive_mutex_unlock (__gthread_recursive_mutex_t *mutex)
{
--(mutex->depth);
if (mutex->depth == 0)
{
mutex->owner = 0;
if (InterlockedDecrement (&mutex->counter) >= 0)
return ReleaseSemaphore (mutex->sema, 1, NULL) ? 0 : 1;
}
return 0;
}
2004-08-27 Jason Merrill <jason@redhat.com>
PR c++/13684
* decl.c (expand_static_init): Use thread-safety API.
(register_dtor_fn): Return the call, don't expand it.
* tree.c (add_stmt_to_compound): New fn.
(stabilize_call): Use it.
2004-08-27 Richard Henderson <rth@redhat.com> 2004-08-27 Richard Henderson <rth@redhat.com>
* cp-tree.def (OFFSETOF_EXPR): New. * cp-tree.def (OFFSETOF_EXPR): New.
......
...@@ -3801,7 +3801,7 @@ extern tree build_target_expr_with_type (tree, tree); ...@@ -3801,7 +3801,7 @@ extern tree build_target_expr_with_type (tree, tree);
extern int local_variable_p (tree); extern int local_variable_p (tree);
extern int nonstatic_local_decl_p (tree); extern int nonstatic_local_decl_p (tree);
extern tree declare_global_var (tree, tree); extern tree declare_global_var (tree, tree);
extern void register_dtor_fn (tree); extern tree register_dtor_fn (tree);
extern tmpl_spec_kind current_tmpl_spec_kind (int); extern tmpl_spec_kind current_tmpl_spec_kind (int);
extern tree cp_fname_init (const char *, tree *); extern tree cp_fname_init (const char *, tree *);
extern tree builtin_function (const char *name, tree type, extern tree builtin_function (const char *name, tree type,
...@@ -4198,6 +4198,7 @@ extern void lang_check_failed (const char *, int, ...@@ -4198,6 +4198,7 @@ extern void lang_check_failed (const char *, int,
extern tree stabilize_expr (tree, tree *); extern tree stabilize_expr (tree, tree *);
extern void stabilize_call (tree, tree *); extern void stabilize_call (tree, tree *);
extern bool stabilize_init (tree, tree *); extern bool stabilize_init (tree, tree *);
extern tree add_stmt_to_compound (tree, tree);
extern tree cxx_maybe_build_cleanup (tree); extern tree cxx_maybe_build_cleanup (tree);
extern void init_tree (void); extern void init_tree (void);
extern int pod_type_p (tree); extern int pod_type_p (tree);
......
...@@ -5081,7 +5081,7 @@ end_cleanup_fn (void) ...@@ -5081,7 +5081,7 @@ end_cleanup_fn (void)
/* Generate code to handle the destruction of DECL, an object with /* Generate code to handle the destruction of DECL, an object with
static storage duration. */ static storage duration. */
void tree
register_dtor_fn (tree decl) register_dtor_fn (tree decl)
{ {
tree cleanup; tree cleanup;
...@@ -5090,7 +5090,7 @@ register_dtor_fn (tree decl) ...@@ -5090,7 +5090,7 @@ register_dtor_fn (tree decl)
tree fcall; tree fcall;
if (TYPE_HAS_TRIVIAL_DESTRUCTOR (TREE_TYPE (decl))) if (TYPE_HAS_TRIVIAL_DESTRUCTOR (TREE_TYPE (decl)))
return; return void_zero_node;
/* Call build_cleanup before we enter the anonymous function so that /* Call build_cleanup before we enter the anonymous function so that
any access checks will be done relative to the current scope, any access checks will be done relative to the current scope,
...@@ -5129,7 +5129,7 @@ register_dtor_fn (tree decl) ...@@ -5129,7 +5129,7 @@ register_dtor_fn (tree decl)
} }
else else
args = tree_cons (NULL_TREE, cleanup, NULL_TREE); args = tree_cons (NULL_TREE, cleanup, NULL_TREE);
finish_expr_stmt (build_function_call (get_atexit_node (), args)); return build_function_call (get_atexit_node (), args);
} }
/* DECL is a VAR_DECL with static storage duration. INIT, if present, /* DECL is a VAR_DECL with static storage duration. INIT, if present,
...@@ -5151,36 +5151,42 @@ expand_static_init (tree decl, tree init) ...@@ -5151,36 +5151,42 @@ expand_static_init (tree decl, tree init)
if (DECL_FUNCTION_SCOPE_P (decl)) if (DECL_FUNCTION_SCOPE_P (decl))
{ {
/* Emit code to perform this initialization but once. */ /* Emit code to perform this initialization but once. */
tree if_stmt; tree if_stmt, inner_if_stmt;
tree then_clause; tree then_clause, inner_then_clause;
tree assignment; tree guard, guard_addr, guard_addr_list;
tree guard; tree acquire_fn, release_fn, abort_fn;
tree guard_init; tree flag, begin;
/* Emit code to perform this initialization but once. This code /* Emit code to perform this initialization but once. This code
looks like: looks like:
static int guard = 0; static <type> guard;
if (!guard) { if (!guard.first_byte) {
// Do initialization. if (__cxa_guard_acquire (&guard)) {
guard = 1; bool flag = false;
// Register variable for destruction at end of program. try {
// Do initialization.
flag = true; __cxa_guard_release (&guard);
// Register variable for destruction at end of program.
} catch {
if (!flag) __cxa_guard_abort (&guard);
}
} }
Note that the `temp' variable is only set to 1 *after* the Note that the `flag' variable is only set to 1 *after* the
initialization is complete. This ensures that an exception, initialization is complete. This ensures that an exception,
thrown during the construction, will cause the variable to thrown during the construction, will cause the variable to
reinitialized when we pass through this code again, as per: reinitialized when we pass through this code again, as per:
[stmt.dcl] [stmt.dcl]
If the initialization exits by throwing an exception, the If the initialization exits by throwing an exception, the
initialization is not complete, so it will be tried again initialization is not complete, so it will be tried again
the next time control enters the declaration. the next time control enters the declaration.
In theory, this process should be thread-safe, too; multiple This process should be thread-safe, too; multiple threads
threads should not be able to initialize the variable more should not be able to initialize the variable more than
than once. We don't yet attempt to ensure thread-safety. */ once. */
/* Create the guard variable. */ /* Create the guard variable. */
guard = get_guard (decl); guard = get_guard (decl);
...@@ -5188,29 +5194,68 @@ expand_static_init (tree decl, tree init) ...@@ -5188,29 +5194,68 @@ expand_static_init (tree decl, tree init)
/* Begin the conditional initialization. */ /* Begin the conditional initialization. */
if_stmt = begin_if_stmt (); if_stmt = begin_if_stmt ();
finish_if_stmt_cond (get_guard_cond (guard), if_stmt); finish_if_stmt_cond (get_guard_cond (guard), if_stmt);
then_clause = begin_compound_stmt (0); then_clause = begin_compound_stmt (BCS_NO_SCOPE);
/* Do the initialization itself. */ if (flag_threadsafe_statics)
assignment = init ? init : NULL_TREE; {
guard_addr = build_address (guard);
/* Once the assignment is complete, set TEMP to 1. Since the guard_addr_list = build_tree_list (NULL_TREE, guard_addr);
construction of the static object is complete at this point,
we want to make sure TEMP is set to 1 even if a temporary acquire_fn = get_identifier ("__cxa_guard_acquire");
constructed during the initialization throws an exception release_fn = get_identifier ("__cxa_guard_release");
when it is destroyed. So, we combine the initialization and abort_fn = get_identifier ("__cxa_guard_abort");
the assignment to TEMP into a single expression, ensuring if (!get_global_value_if_present (acquire_fn, &acquire_fn))
that when we call finish_expr_stmt the cleanups will not be {
run until after TEMP is set to 1. */ tree argtypes = tree_cons (NULL_TREE, TREE_TYPE (guard_addr),
guard_init = set_guard (guard); void_list_node);
if (assignment) tree vfntype = build_function_type (void_type_node, argtypes);
assignment = build_compound_expr (assignment, guard_init); acquire_fn = push_library_fn
(acquire_fn, build_function_type (integer_type_node, argtypes));
release_fn = push_library_fn (release_fn, vfntype);
abort_fn = push_library_fn (abort_fn, vfntype);
}
else
{
release_fn = identifier_global_value (release_fn);
abort_fn = identifier_global_value (abort_fn);
}
inner_if_stmt = begin_if_stmt ();
finish_if_stmt_cond (build_call (acquire_fn, guard_addr_list),
inner_if_stmt);
inner_then_clause = begin_compound_stmt (BCS_NO_SCOPE);
begin = get_target_expr (boolean_false_node);
flag = TARGET_EXPR_SLOT (begin);
TARGET_EXPR_CLEANUP (begin)
= build (COND_EXPR, void_type_node, flag,
void_zero_node,
build_call (abort_fn, guard_addr_list));
CLEANUP_EH_ONLY (begin) = 1;
/* Do the initialization itself. */
init = add_stmt_to_compound (begin, init);
init = add_stmt_to_compound
(init, build (MODIFY_EXPR, void_type_node, flag, boolean_true_node));
init = add_stmt_to_compound
(init, build_call (release_fn, guard_addr_list));
}
else else
assignment = guard_init; init = add_stmt_to_compound (init, set_guard (guard));
finish_expr_stmt (assignment);
/* Use atexit to register a function for destroying this static /* Use atexit to register a function for destroying this static
variable. */ variable. */
register_dtor_fn (decl); init = add_stmt_to_compound (init, register_dtor_fn (decl));
finish_expr_stmt (init);
if (flag_threadsafe_statics)
{
finish_compound_stmt (inner_then_clause);
finish_then_clause (inner_if_stmt);
finish_if_stmt (inner_if_stmt);
}
finish_compound_stmt (then_clause); finish_compound_stmt (then_clause);
finish_then_clause (if_stmt); finish_then_clause (if_stmt);
......
...@@ -2446,7 +2446,7 @@ do_static_initialization (tree decl, tree init) ...@@ -2446,7 +2446,7 @@ do_static_initialization (tree decl, tree init)
/* If we're using __cxa_atexit, register a a function that calls the /* If we're using __cxa_atexit, register a a function that calls the
destructor for the object. */ destructor for the object. */
if (flag_use_cxa_atexit) if (flag_use_cxa_atexit)
register_dtor_fn (decl); finish_expr_stmt (register_dtor_fn (decl));
/* Finish up. */ /* Finish up. */
finish_static_initialization_or_destruction (guard_if_stmt); finish_static_initialization_or_destruction (guard_if_stmt);
......
...@@ -2254,6 +2254,19 @@ stabilize_expr (tree exp, tree* initp) ...@@ -2254,6 +2254,19 @@ stabilize_expr (tree exp, tree* initp)
return exp; return exp;
} }
/* Add NEW, an expression whose value we don't care about, after the
similar expression ORIG. */
tree
add_stmt_to_compound (tree orig, tree new)
{
if (!new || !TREE_SIDE_EFFECTS (new))
return orig;
if (!orig || !TREE_SIDE_EFFECTS (orig))
return new;
return build2 (COMPOUND_EXPR, void_type_node, orig, new);
}
/* Like stabilize_expr, but for a call whose args we want to /* Like stabilize_expr, but for a call whose args we want to
pre-evaluate. */ pre-evaluate. */
...@@ -2275,12 +2288,7 @@ stabilize_call (tree call, tree *initp) ...@@ -2275,12 +2288,7 @@ stabilize_call (tree call, tree *initp)
{ {
tree init; tree init;
TREE_VALUE (t) = stabilize_expr (TREE_VALUE (t), &init); TREE_VALUE (t) = stabilize_expr (TREE_VALUE (t), &init);
if (!init) inits = add_stmt_to_compound (inits, init);
/* Nothing. */;
else if (inits)
inits = build2 (COMPOUND_EXPR, void_type_node, inits, init);
else
inits = init;
} }
*initp = inits; *initp = inits;
......
...@@ -182,7 +182,7 @@ in the following sections. ...@@ -182,7 +182,7 @@ in the following sections.
-fno-nonansi-builtins -fno-operator-names @gol -fno-nonansi-builtins -fno-operator-names @gol
-fno-optional-diags -fpermissive @gol -fno-optional-diags -fpermissive @gol
-frepo -fno-rtti -fstats -ftemplate-depth-@var{n} @gol -frepo -fno-rtti -fstats -ftemplate-depth-@var{n} @gol
-fuse-cxa-atexit -fno-weak -nostdinc++ @gol -fno-threadsafe-statics -fuse-cxa-atexit -fno-weak -nostdinc++ @gol
-fno-default-inline -fvisibility-inlines-hidden @gol -fno-default-inline -fvisibility-inlines-hidden @gol
-Wabi -Wctor-dtor-privacy @gol -Wabi -Wctor-dtor-privacy @gol
-Wnon-virtual-dtor -Wreorder @gol -Wnon-virtual-dtor -Wreorder @gol
...@@ -1470,6 +1470,13 @@ A limit on the template instantiation depth is needed to detect ...@@ -1470,6 +1470,13 @@ A limit on the template instantiation depth is needed to detect
endless recursions during template class instantiation. ANSI/ISO C++ endless recursions during template class instantiation. ANSI/ISO C++
conforming programs must not rely on a maximum depth greater than 17. conforming programs must not rely on a maximum depth greater than 17.
@item -fno-threadsafe-statics
@opindex fno-threadsafe-statics
Do not emit the extra code to use the routines specified in the C++
ABI for thread-safe initialization of local statics. You can use this
option to reduce code size slightly in code that doesn't need to be
thread-safe.
@item -fuse-cxa-atexit @item -fuse-cxa-atexit
@opindex fuse-cxa-atexit @opindex fuse-cxa-atexit
Register destructors for objects with static storage duration with the Register destructors for objects with static storage duration with the
......
...@@ -3271,9 +3271,15 @@ gimplify_cleanup_point_expr (tree *expr_p, tree *pre_p) ...@@ -3271,9 +3271,15 @@ gimplify_cleanup_point_expr (tree *expr_p, tree *pre_p)
else else
{ {
tree sl, tfe; tree sl, tfe;
enum tree_code code;
if (CLEANUP_EH_ONLY (wce))
code = TRY_CATCH_EXPR;
else
code = TRY_FINALLY_EXPR;
sl = tsi_split_statement_list_after (&iter); sl = tsi_split_statement_list_after (&iter);
tfe = build (TRY_FINALLY_EXPR, void_type_node, sl, NULL_TREE); tfe = build (code, void_type_node, sl, NULL_TREE);
append_to_statement_list (TREE_OPERAND (wce, 0), append_to_statement_list (TREE_OPERAND (wce, 0),
&TREE_OPERAND (tfe, 1)); &TREE_OPERAND (tfe, 1));
*wce_p = tfe; *wce_p = tfe;
...@@ -3301,7 +3307,7 @@ gimplify_cleanup_point_expr (tree *expr_p, tree *pre_p) ...@@ -3301,7 +3307,7 @@ gimplify_cleanup_point_expr (tree *expr_p, tree *pre_p)
is the cleanup action required. */ is the cleanup action required. */
static void static void
gimple_push_cleanup (tree var, tree cleanup, tree *pre_p) gimple_push_cleanup (tree var, tree cleanup, bool eh_only, tree *pre_p)
{ {
tree wce; tree wce;
...@@ -3352,6 +3358,7 @@ gimple_push_cleanup (tree var, tree cleanup, tree *pre_p) ...@@ -3352,6 +3358,7 @@ gimple_push_cleanup (tree var, tree cleanup, tree *pre_p)
else else
{ {
wce = build (WITH_CLEANUP_EXPR, void_type_node, cleanup); wce = build (WITH_CLEANUP_EXPR, void_type_node, cleanup);
CLEANUP_EH_ONLY (wce) = eh_only;
append_to_statement_list (wce, pre_p); append_to_statement_list (wce, pre_p);
} }
...@@ -3399,7 +3406,8 @@ gimplify_target_expr (tree *expr_p, tree *pre_p, tree *post_p) ...@@ -3399,7 +3406,8 @@ gimplify_target_expr (tree *expr_p, tree *pre_p, tree *post_p)
if (TARGET_EXPR_CLEANUP (targ)) if (TARGET_EXPR_CLEANUP (targ))
{ {
gimplify_stmt (&TARGET_EXPR_CLEANUP (targ)); gimplify_stmt (&TARGET_EXPR_CLEANUP (targ));
gimple_push_cleanup (temp, TARGET_EXPR_CLEANUP (targ), pre_p); gimple_push_cleanup (temp, TARGET_EXPR_CLEANUP (targ),
CLEANUP_EH_ONLY (targ), pre_p);
} }
/* Only expand this once. */ /* Only expand this once. */
......
/* Threads compatibility routines for libgcc2 and libobjc. */
/* Compile this one with gcc. */ /* Compile this one with gcc. */
/* Copyright (C) 1997, 1999, 2000, 2001 Free Software Foundation, Inc. /* Copyright (C) 1997, 1999, 2000, 2001 Free Software Foundation, Inc.
...@@ -52,10 +52,12 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA ...@@ -52,10 +52,12 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
typedef pthread_key_t __gthread_key_t; typedef pthread_key_t __gthread_key_t;
typedef pthread_once_t __gthread_once_t; typedef pthread_once_t __gthread_once_t;
typedef pthread_mutex_t __gthread_mutex_t; typedef pthread_mutex_t __gthread_mutex_t;
typedef pthread_mutex_t __gthread_recursive_mutex_t;
#define __GTHREAD_ONCE_INIT pthread_once_init #define __GTHREAD_ONCE_INIT pthread_once_init
#define __GTHREAD_MUTEX_INIT_FUNCTION __gthread_mutex_init_function #define __GTHREAD_MUTEX_INIT_FUNCTION __gthread_mutex_init_function
#define __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION __gthread_recursive_mutex_init_function
#define __GTHREAD_MUTEX_INIT_DEFAULT pthread_once_init #define __GTHREAD_MUTEX_INIT_DEFAULT pthread_once_init
...@@ -481,6 +483,43 @@ __gthread_mutex_unlock (__gthread_mutex_t *mutex) ...@@ -481,6 +483,43 @@ __gthread_mutex_unlock (__gthread_mutex_t *mutex)
return 0; return 0;
} }
static inline int
__gthread_recursive_mutex_init_function (__gthread_recursive_mutex_t *mutex)
{
if (__gthread_active_p ())
{
pthread_mutexattr_t attr;
int r;
r = pthread_mutexattr_create (&attr);
if (!r)
r = pthread_mutexattr_setkind_np (&attr, MUTEX_RECURSIVE_NP);
if (!r)
r = pthread_mutex_init (mutex, attr);
if (!r)
r = pthread_mutexattr_delete (&attr);
return r;
}
}
static inline int
__gthread_recursive_mutex_lock (__gthread_recursive_mutex_t *mutex)
{
return __gthread_mutex_lock (mutex);
}
static inline int
__gthread_recursive_mutex_trylock (__gthread_recursive_mutex_t *mutex)
{
return __gthread_mutex_trylock (mutex);
}
static inline int
__gthread_recursive_mutex_unlock (__gthread_recursive_mutex_t *mutex)
{
return __gthread_mutex_unlock (mutex);
}
#endif /* _LIBOBJC */ #endif /* _LIBOBJC */
#undef UNUSED #undef UNUSED
......
...@@ -46,9 +46,17 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA ...@@ -46,9 +46,17 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
typedef pthread_key_t __gthread_key_t; typedef pthread_key_t __gthread_key_t;
typedef pthread_once_t __gthread_once_t; typedef pthread_once_t __gthread_once_t;
typedef pthread_mutex_t __gthread_mutex_t; typedef pthread_mutex_t __gthread_mutex_t;
typedef pthread_mutex_t __gthread_recursive_mutex_t;
#define __GTHREAD_MUTEX_INIT PTHREAD_MUTEX_INITIALIZER #define __GTHREAD_MUTEX_INIT PTHREAD_MUTEX_INITIALIZER
#define __GTHREAD_ONCE_INIT PTHREAD_ONCE_INIT #define __GTHREAD_ONCE_INIT PTHREAD_ONCE_INIT
#if defined(PTHREAD_RECURSIVE_MUTEX_INITIALIZER)
#define __GTHREAD_RECURSIVE_MUTEX_INIT PTHREAD_RECURSIVE_MUTEX_INITIALIZER
#elif defined(PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP)
#define __GTHREAD_RECURSIVE_MUTEX_INIT PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
#else
#define __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION __gthread_recursive_mutex_init_function
#endif
#if SUPPORTS_WEAK && GTHREAD_USE_WEAK #if SUPPORTS_WEAK && GTHREAD_USE_WEAK
...@@ -516,6 +524,45 @@ __gthread_mutex_unlock (__gthread_mutex_t *mutex) ...@@ -516,6 +524,45 @@ __gthread_mutex_unlock (__gthread_mutex_t *mutex)
return 0; return 0;
} }
#ifndef PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
static inline int
__gthread_recursive_mutex_init_function (__gthread_recursive_mutex_t *mutex)
{
if (__gthread_active_p ())
{
pthread_mutexattr_t attr;
int r;
r = pthread_mutexattr_init (&attr);
if (!r)
r = pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_RECURSIVE);
if (!r)
r = pthread_mutex_init (mutex, &attr);
if (!r)
r = pthread_mutexattr_destroy (&attr);
return r;
}
}
#endif
static inline int
__gthread_recursive_mutex_lock (__gthread_recursive_mutex_t *mutex)
{
return __gthread_mutex_lock (mutex);
}
static inline int
__gthread_recursive_mutex_trylock (__gthread_recursive_mutex_t *mutex)
{
return __gthread_mutex_trylock (mutex);
}
static inline int
__gthread_recursive_mutex_unlock (__gthread_recursive_mutex_t *mutex)
{
return __gthread_mutex_unlock (mutex);
}
#endif /* _LIBOBJC */ #endif /* _LIBOBJC */
#endif /* ! GCC_GTHR_POSIX_H */ #endif /* ! GCC_GTHR_POSIX_H */
...@@ -32,6 +32,7 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA ...@@ -32,6 +32,7 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
/* Just provide compatibility for mutex handling. */ /* Just provide compatibility for mutex handling. */
typedef int __gthread_mutex_t; typedef int __gthread_mutex_t;
typedef int __gthread_recursive_mutex_t;
#define __GTHREAD_MUTEX_INIT 0 #define __GTHREAD_MUTEX_INIT 0
...@@ -232,6 +233,24 @@ __gthread_mutex_unlock (__gthread_mutex_t * UNUSED(mutex)) ...@@ -232,6 +233,24 @@ __gthread_mutex_unlock (__gthread_mutex_t * UNUSED(mutex))
return 0; return 0;
} }
static inline int
__gthread_recursive_mutex_lock (__gthread_recursive_mutex_t *mutex)
{
return __gthread_mutex_lock (mutex);
}
static inline int
__gthread_recursive_mutex_trylock (__gthread_recursive_mutex_t *mutex)
{
return __gthread_mutex_trylock (mutex);
}
static inline int
__gthread_recursive_mutex_unlock (__gthread_recursive_mutex_t *mutex)
{
return __gthread_mutex_unlock (mutex);
}
#endif /* _LIBOBJC */ #endif /* _LIBOBJC */
#undef UNUSED #undef UNUSED
......
...@@ -44,9 +44,11 @@ typedef struct { ...@@ -44,9 +44,11 @@ typedef struct {
int once; int once;
} __gthread_once_t; } __gthread_once_t;
typedef mutex_t __gthread_mutex_t; typedef mutex_t __gthread_mutex_t;
typedef mutex_t __gthread_recursive_mutex_t;
#define __GTHREAD_ONCE_INIT { DEFAULTMUTEX, 0 } #define __GTHREAD_ONCE_INIT { DEFAULTMUTEX, 0 }
#define __GTHREAD_MUTEX_INIT DEFAULTMUTEX #define __GTHREAD_MUTEX_INIT DEFAULTMUTEX
#define __GTHREAD_RECURSIVE_MUTEX_INIT RECURSIVE_ERRORCHECKMUTEX
#if SUPPORTS_WEAK && GTHREAD_USE_WEAK #if SUPPORTS_WEAK && GTHREAD_USE_WEAK
...@@ -466,6 +468,24 @@ __gthread_mutex_unlock (__gthread_mutex_t *mutex) ...@@ -466,6 +468,24 @@ __gthread_mutex_unlock (__gthread_mutex_t *mutex)
return 0; return 0;
} }
static inline int
__gthread_recursive_mutex_lock (__gthread_recursive_mutex_t *mutex)
{
return __gthread_mutex_lock (mutex);
}
static inline int
__gthread_recursive_mutex_trylock (__gthread_recursive_mutex_t *mutex)
{
return __gthread_mutex_trylock (mutex);
}
static inline int
__gthread_recursive_mutex_unlock (__gthread_recursive_mutex_t *mutex)
{
return __gthread_mutex_unlock (mutex);
}
#endif /* _LIBOBJC */ #endif /* _LIBOBJC */
#endif /* ! GCC_GTHR_SOLARIS_H */ #endif /* ! GCC_GTHR_SOLARIS_H */
...@@ -45,7 +45,10 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA ...@@ -45,7 +45,10 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
#include <semLib.h> #include <semLib.h>
typedef SEM_ID __gthread_mutex_t; typedef SEM_ID __gthread_mutex_t;
/* All VxWorks mutexes are recursive. */
typedef SEM_ID __gthread_recursive_mutex_t;
#define __GTHREAD_MUTEX_INIT_FUNCTION __gthread_mutex_init_function #define __GTHREAD_MUTEX_INIT_FUNCTION __gthread_mutex_init_function
#define __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION __gthread_recursive_mutex_init_function
static inline void static inline void
__gthread_mutex_init_function (__gthread_mutex_t *mutex) __gthread_mutex_init_function (__gthread_mutex_t *mutex)
...@@ -71,6 +74,30 @@ __gthread_mutex_unlock (__gthread_mutex_t *mutex) ...@@ -71,6 +74,30 @@ __gthread_mutex_unlock (__gthread_mutex_t *mutex)
return semGive (*mutex); return semGive (*mutex);
} }
static inline void
__gthread_recursive_mutex_init_function (__gthread_recursive_mutex_t *mutex)
{
__gthread_mutex_init_function (mutex);
}
static inline int
__gthread_recursive_mutex_lock (__gthread_recursive_mutex_t *mutex)
{
return __gthread_mutex_lock (mutex);
}
static inline int
__gthread_recursive_mutex_trylock (__gthread_recursive_mutex_t *mutex)
{
return __gthread_mutex_trylock (mutex);
}
static inline int
__gthread_recursive_mutex_unlock (__gthread_recursive_mutex_t *mutex)
{
return __gthread_mutex_unlock (mutex);
}
/* pthread_once is complicated enough that it's implemented /* pthread_once is complicated enough that it's implemented
out-of-line. See config/vxlib.c. */ out-of-line. See config/vxlib.c. */
......
...@@ -343,9 +343,18 @@ typedef struct { ...@@ -343,9 +343,18 @@ typedef struct {
void *sema; void *sema;
} __gthread_mutex_t; } __gthread_mutex_t;
typedef struct {
long counter;
long depth;
DWORD owner;
void *sema;
} __gthread_recursive_mutex_t;
#define __GTHREAD_ONCE_INIT {0, -1} #define __GTHREAD_ONCE_INIT {0, -1}
#define __GTHREAD_MUTEX_INIT_FUNCTION __gthread_mutex_init_function #define __GTHREAD_MUTEX_INIT_FUNCTION __gthread_mutex_init_function
#define __GTHREAD_MUTEX_INIT_DEFAULT {-1, 0} #define __GTHREAD_MUTEX_INIT_DEFAULT {-1, 0}
#define __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION __gthread_mutex_init_function
#define __GTHREAD_RECURSIVE_MUTEX_INIT_DEFAULT {-1, 0}
#if __MINGW32_MAJOR_VERSION >= 1 || \ #if __MINGW32_MAJOR_VERSION >= 1 || \
(__MINGW32_MAJOR_VERSION == 0 && __MINGW32_MINOR_VERSION > 2) (__MINGW32_MAJOR_VERSION == 0 && __MINGW32_MINOR_VERSION > 2)
...@@ -472,6 +481,33 @@ __gthread_mutex_unlock (__gthread_mutex_t *mutex) ...@@ -472,6 +481,33 @@ __gthread_mutex_unlock (__gthread_mutex_t *mutex)
return 0; return 0;
} }
static inline int
__gthread_recursive_mutex_lock (__gthread_recursive_mutex_t *mutex)
{
if (__gthread_active_p ())
return __gthr_win32_recursive_mutex_lock (mutex);
else
return 0;
}
static inline int
__gthread_recursive_mutex_trylock (__gthread_recursive_mutex_t *mutex)
{
if (__gthread_active_p ())
return __gthr_win32_recursive_mutex_trylock (mutex);
else
return 0;
}
static inline int
__gthread_recursive_mutex_unlock (__gthread_recursive_mutex_t *mutex)
{
if (__gthread_active_p ())
return __gthr_win32_recursive_mutex_unlock (mutex);
else
return 0;
}
#else /* ! __GTHREAD_HIDE_WIN32API */ #else /* ! __GTHREAD_HIDE_WIN32API */
#include <windows.h> #include <windows.h>
...@@ -610,6 +646,83 @@ __gthread_mutex_unlock (__gthread_mutex_t *mutex) ...@@ -610,6 +646,83 @@ __gthread_mutex_unlock (__gthread_mutex_t *mutex)
return 0; return 0;
} }
static inline void
__gthread_recursive_mutex_init_function (__gthread_recursive_mutex_t *mutex)
{
mutex->counter = -1;
mutex->depth = 0;
mutex->owner = 0;
mutex->sema = CreateSemaphore (NULL, 0, 65535, NULL);
}
static inline int
__gthread_recursive_mutex_lock (__gthread_recursive_mutex_t *mutex)
{
if (__gthread_active_p ())
{
DWORD me = GetCurrentThreadId();
if (InterlockedIncrement (&mutex->counter) == 0)
{
mutex->depth = 1;
mutex->owner = me;
}
else if (mutex->owner == me)
{
InterlockedDecrement (&mx->lock_idx);
++(mutex->depth);
}
else if (WaitForSingleObject (mutex->sema, INFINITE) == WAIT_OBJECT_0)
{
mutex->depth = 1;
mutex->owner = me;
}
else
{
/* WaitForSingleObject returns WAIT_FAILED, and we can only do
some best-effort cleanup here. */
InterlockedDecrement (&mutex->counter);
return 1;
}
}
return 0;
}
static inline int
__gthread_recursive_mutex_trylock (__gthread_recursive_mutex_t *mutex)
{
if (__gthread_active_p ())
{
DWORD me = GetCurrentThreadId();
if (__GTHR_W32_InterlockedCompareExchange (&mutex->counter, 0, -1) < 0)
{
mutex->depth = 1;
mutex->owner = me;
}
else if (mutex->owner == me)
++(mutex->depth);
else
return 1;
}
return 0;
}
static inline int
__gthread_recursive_mutex_unlock (__gthread_recursive_mutex_t *mutex)
{
if (__gthread_active_p ())
{
--(mutex->depth);
if (mutex->depth == 0)
{
mutex->owner = 0;
if (InterlockedDecrement (&mutex->counter) >= 0)
return ReleaseSemaphore (mutex->sema, 1, NULL) ? 0 : 1;
}
}
return 0;
}
#endif /* __GTHREAD_HIDE_WIN32API */ #endif /* __GTHREAD_HIDE_WIN32API */
#ifdef __cplusplus #ifdef __cplusplus
......
...@@ -42,6 +42,7 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA ...@@ -42,6 +42,7 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
__gthread_key_t __gthread_key_t
__gthread_once_t __gthread_once_t
__gthread_mutex_t __gthread_mutex_t
__gthread_recursive_mutex_t
The threads interface must define the following macros: The threads interface must define the following macros:
...@@ -56,6 +57,9 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA ...@@ -56,6 +57,9 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
function which looks like this: function which looks like this:
void __GTHREAD_MUTEX_INIT_FUNCTION (__gthread_mutex_t *) void __GTHREAD_MUTEX_INIT_FUNCTION (__gthread_mutex_t *)
Don't define __GTHREAD_MUTEX_INIT in this case Don't define __GTHREAD_MUTEX_INIT in this case
__GTHREAD_RECURSIVE_MUTEX_INIT
__GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION
as above, but for a recursive mutex.
The threads interface must define the following static functions: The threads interface must define the following static functions:
...@@ -71,6 +75,10 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA ...@@ -71,6 +75,10 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
int __gthread_mutex_trylock (__gthread_mutex_t *mutex); int __gthread_mutex_trylock (__gthread_mutex_t *mutex);
int __gthread_mutex_unlock (__gthread_mutex_t *mutex); int __gthread_mutex_unlock (__gthread_mutex_t *mutex);
int __gthread_recursive_mutex_lock (__gthread_recursive_mutex_t *mutex);
int __gthread_recursive_mutex_trylock (__gthread_recursive_mutex_t *mutex);
int __gthread_recursive_mutex_unlock (__gthread_recursive_mutex_t *mutex);
All functions returning int should return zero on success or the error All functions returning int should return zero on success or the error
number. If the operation is not supported, -1 is returned. number. If the operation is not supported, -1 is returned.
......
...@@ -40,6 +40,9 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA ...@@ -40,6 +40,9 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
#define HAVE_DECL_GETOPT 1 #define HAVE_DECL_GETOPT 1
#endif #endif
/* We want everything from the glibc headers. */
#define _GNU_SOURCE 1
/* GCC supplies these headers. */ /* GCC supplies these headers. */
#include <stddef.h> #include <stddef.h>
#include <float.h> #include <float.h>
......
2004-08-27 Jason Merrill <jason@redhat.com>
PR c++/13684
* libsupc++/guard.cc (static_mutex): Internal class implementing a
recursive mutex which controls initialization of local statics.
(__gnu_cxx::recursive_init): New exception class.
(__cxa_guard_acquire): Deal with locking and recursion detection.
(acquire_1, __cxa_guard_abort, __cxa_guard_release): Likewise.
2004-08-27 Matthias Klose <doko@debian.org> 2004-08-27 Matthias Klose <doko@debian.org>
* configure.host: For mips*-*-linux* update cpu_include_dir * configure.host: For mips*-*-linux* update cpu_include_dir
......
...@@ -29,26 +29,154 @@ ...@@ -29,26 +29,154 @@
// Written by Mark Mitchell, CodeSourcery LLC, <mark@codesourcery.com> // Written by Mark Mitchell, CodeSourcery LLC, <mark@codesourcery.com>
#include <cxxabi.h> #include <cxxabi.h>
#include <exception>
#include <bits/c++config.h>
#include <bits/gthr.h>
// The IA64/generic ABI uses the first byte of the guard variable. // The IA64/generic ABI uses the first byte of the guard variable.
// The ARM EABI uses the least significant bit. // The ARM EABI uses the least significant bit.
// Thread-safe static local initialization support.
#ifdef __GTHREADS
namespace
{
// static_mutex is a single mutex controlling all static initializations.
// This is a static class--the need for a static initialization function
// to pass to __gthread_once precludes creating multiple instances, though
// I suppose you could achieve the same effect with a template.
class static_mutex
{
static __gthread_recursive_mutex_t mutex;
#ifdef __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION
static void init();
#endif
public:
static void lock();
static void unlock();
};
__gthread_recursive_mutex_t static_mutex::mutex
#ifdef __GTHREAD_RECURSIVE_MUTEX_INIT
= __GTHREAD_RECURSIVE_MUTEX_INIT
#endif
;
#ifdef __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION
void static_mutex::init()
{
__GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION (&mutex);
}
#endif
void static_mutex::lock()
{
#ifdef __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION
static __gthread_once_t once = __GTHREAD_ONCE_INIT;
__gthread_once (&once, init);
#endif
__gthread_recursive_mutex_lock (&mutex);
}
void static_mutex::unlock ()
{
__gthread_recursive_mutex_unlock (&mutex);
}
}
#endif
namespace __gnu_cxx
{
// 6.7[stmt.dcl]/4: If control re-enters the declaration (recursively)
// while the object is being initialized, the behavior is undefined.
// Since we already have a library function to handle locking, we might
// as well check for this situation and throw an exception.
// We use the second byte of the guard variable to remember that we're
// in the middle of an initialization.
class recursive_init: public std::exception
{
public:
recursive_init() throw() { }
virtual ~recursive_init() throw ();
};
recursive_init::~recursive_init() throw() { }
}
namespace __cxxabiv1 namespace __cxxabiv1
{ {
static int
acquire_1 (__guard *g)
{
if (_GLIBCXX_GUARD_ACQUIRE (g))
{
if (((char *)g)[1]++)
{
#ifdef __EXCEPTIONS
throw __gnu_cxx::recursive_init();
#else
abort ();
#endif
}
return 1;
}
return 0;
}
extern "C" extern "C"
int __cxa_guard_acquire (__guard *g) int __cxa_guard_acquire (__guard *g)
{ {
return _GLIBCXX_GUARD_ACQUIRE (g); #ifdef __GTHREADS
if (__gthread_active_p ())
{
// Simple wrapper for exception safety.
struct mutex_wrapper
{
bool unlock;
mutex_wrapper (): unlock(true)
{
static_mutex::lock ();
}
~mutex_wrapper ()
{
if (unlock)
static_mutex::unlock ();
}
} mw;
if (acquire_1 (g))
{
mw.unlock = false;
return 1;
}
return 0;
}
#endif
return acquire_1 (g);
} }
extern "C" extern "C"
void __cxa_guard_release (__guard *g) void __cxa_guard_abort (__guard *g)
{ {
_GLIBCXX_GUARD_RELEASE (g); ((char *)g)[1]--;
#ifdef __GTHREADS
if (__gthread_active_p ())
static_mutex::unlock ();
#endif
} }
extern "C" extern "C"
void __cxa_guard_abort (__guard *) void __cxa_guard_release (__guard *g)
{ {
((char *)g)[1]--;
_GLIBCXX_GUARD_RELEASE (g);
#ifdef __GTHREADS
if (__gthread_active_p ())
static_mutex::unlock ();
#endif
} }
} }
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment