Commit 48ae6c13 by Richard Henderson

re PR middle-end/14311 (builtins for atomic operations needed)

	PR middle-end/14311
	* builtin-types.def (BT_BOOL, BT_VOLATILE_PTR, BT_I1, BT_I2,
	BT_I4, BT_I8, BT_FN_VOID_VPTR, BT_FN_I1_VPTR_I1, BT_FN_I2_VPTR_I2,
	BT_FN_I4_VPTR_I4, BT_FN_I8_VPTR_I8, BT_FN_BOOL_VPTR_I1_I1,
	BT_FN_BOOL_VPTR_I2_I2, BT_FN_BOOL_VPTR_I4_I4, BT_FN_BOOL_VPTR_I8_I8,
	BT_FN_I1_VPTR_I1_I1, BT_FN_I2_VPTR_I2_I2, BT_FN_I4_VPTR_I4_I4,
	BT_FN_I8_VPTR_I8_I8): New.
	* builtins.def (DEF_SYNC_BUILTIN): New.
	(BUILT_IN_FETCH_AND_ADD_N, BUILT_IN_FETCH_AND_ADD_1,
	BUILT_IN_FETCH_AND_ADD_2, BUILT_IN_FETCH_AND_ADD_4,
	BUILT_IN_FETCH_AND_ADD_8, BUILT_IN_FETCH_AND_SUB_N,
	BUILT_IN_FETCH_AND_SUB_1, BUILT_IN_FETCH_AND_SUB_2,
	BUILT_IN_FETCH_AND_SUB_4, BUILT_IN_FETCH_AND_SUB_8,
	BUILT_IN_FETCH_AND_OR_N, BUILT_IN_FETCH_AND_OR_1,
	BUILT_IN_FETCH_AND_OR_2, BUILT_IN_FETCH_AND_OR_4,
	BUILT_IN_FETCH_AND_OR_8, BUILT_IN_FETCH_AND_AND_N,
	BUILT_IN_FETCH_AND_AND_1, BUILT_IN_FETCH_AND_AND_2,
	BUILT_IN_FETCH_AND_AND_4, BUILT_IN_FETCH_AND_AND_8,
	BUILT_IN_FETCH_AND_XOR_N, BUILT_IN_FETCH_AND_XOR_1,
	BUILT_IN_FETCH_AND_XOR_2, BUILT_IN_FETCH_AND_XOR_4,
	BUILT_IN_FETCH_AND_XOR_8, BUILT_IN_FETCH_AND_NAND_N,
	BUILT_IN_FETCH_AND_NAND_1, BUILT_IN_FETCH_AND_NAND_2,
	BUILT_IN_FETCH_AND_NAND_4, BUILT_IN_FETCH_AND_NAND_8,
	BUILT_IN_ADD_AND_FETCH_N, BUILT_IN_ADD_AND_FETCH_1,
	BUILT_IN_ADD_AND_FETCH_2, BUILT_IN_ADD_AND_FETCH_4,
	BUILT_IN_ADD_AND_FETCH_8, BUILT_IN_SUB_AND_FETCH_N,
	BUILT_IN_SUB_AND_FETCH_1, BUILT_IN_SUB_AND_FETCH_2,
	BUILT_IN_SUB_AND_FETCH_4, BUILT_IN_SUB_AND_FETCH_8,
	BUILT_IN_OR_AND_FETCH_N, BUILT_IN_OR_AND_FETCH_1,
	BUILT_IN_OR_AND_FETCH_2, BUILT_IN_OR_AND_FETCH_4,
	BUILT_IN_OR_AND_FETCH_8, BUILT_IN_AND_AND_FETCH_N,
	BUILT_IN_AND_AND_FETCH_1, BUILT_IN_AND_AND_FETCH_2,
	BUILT_IN_AND_AND_FETCH_4, BUILT_IN_AND_AND_FETCH_8,
	BUILT_IN_XOR_AND_FETCH_N, BUILT_IN_XOR_AND_FETCH_1,
	BUILT_IN_XOR_AND_FETCH_2, BUILT_IN_XOR_AND_FETCH_4,
	BUILT_IN_XOR_AND_FETCH_8, BUILT_IN_NAND_AND_FETCH_N,
	BUILT_IN_NAND_AND_FETCH_1, BUILT_IN_NAND_AND_FETCH_2,
	BUILT_IN_NAND_AND_FETCH_4, BUILT_IN_NAND_AND_FETCH_8,
	BUILT_IN_BOOL_COMPARE_AND_SWAP_N, BUILT_IN_BOOL_COMPARE_AND_SWAP_1,
	BUILT_IN_BOOL_COMPARE_AND_SWAP_2, BUILT_IN_BOOL_COMPARE_AND_SWAP_4,
	BUILT_IN_BOOL_COMPARE_AND_SWAP_8, BUILT_IN_VAL_COMPARE_AND_SWAP_N,
	BUILT_IN_VAL_COMPARE_AND_SWAP_1, BUILT_IN_VAL_COMPARE_AND_SWAP_2,
	BUILT_IN_VAL_COMPARE_AND_SWAP_4, BUILT_IN_VAL_COMPARE_AND_SWAP_8,
	BUILT_IN_LOCK_TEST_AND_SET_N, BUILT_IN_LOCK_TEST_AND_SET_1,
	BUILT_IN_LOCK_TEST_AND_SET_2, BUILT_IN_LOCK_TEST_AND_SET_4,
	BUILT_IN_LOCK_TEST_AND_SET_8, BUILT_IN_LOCK_RELEASE_N,
	BUILT_IN_LOCK_RELEASE_1, BUILT_IN_LOCK_RELEASE_2,
	BUILT_IN_LOCK_RELEASE_4, BUILT_IN_LOCK_RELEASE_8,
	BUILT_IN_SYNCHRONIZE: New.
	* builtins.c (called_as_built_in): Rewrite from CALLED_AS_BUILT_IN
	as a function.  Accept __sync_ as a prefix as well.
	(expand_builtin_sync_operation, expand_builtin_compare_and_swap,
	expand_builtin_lock_test_and_set, expand_builtin_synchronize,
	expand_builtin_lock_release): New.
	(expand_builtin): Call them.
	* c-common.c (DEF_BUILTIN): Don't require __builtin_ prefix if
	neither BOTH_P nor FALLBACK_P are defined.
	(builtin_type_for_size): New.
	(sync_resolve_size, sync_resolve_params, sync_resolve_return): New.
	(resolve_overloaded_builtin): New.
	* c-common.h (resolve_overloaded_builtin): Declare.
	(builtin_type_for_size): Declare.
	* c-typeck.c (build_function_call): Invoke resolve_overloaded_builtin.
	* expr.c (sync_add_optab, sync_sub_optab, sync_ior_optab,
	sync_and_optab, sync_xor_optab, sync_nand_optab, sync_old_add_optab,
	sync_old_sub_optab, sync_old_ior_optab, sync_old_and_optab,
	sync_old_xor_optab, sync_old_nand_optab, sync_new_add_optab,
	sync_new_sub_optab, sync_new_ior_optab, sync_new_and_optab,
	sync_new_xor_optab, sync_new_nand_optab, sync_compare_and_swap,
	sync_compare_and_swap_cc, sync_lock_test_and_set,
	sync_lock_release): New.
	* optabs.h: Declare them.
	* expr.h (expand_val_compare_and_swap, expand_bool_compare_and_swap,
	expand_sync_operation, expand_sync_fetch_operation,
	expand_sync_lock_test_and_set): Declare.
	* genopinit.c (optabs): Add sync optabs.
	* optabs.c (init_optabs): Initialize sync optabs.
	(expand_val_compare_and_swap_1, expand_val_compare_and_swap,
	expand_bool_compare_and_swap, expand_compare_and_swap_loop,
	expand_sync_operation, expand_sync_fetch_operation,
	expand_sync_lock_test_and_set): New.
	* doc/extend.texi (Atomic Builtins): New section
	* doc/md.texi (Standard Names): Add sync patterns.

From-SVN: r98154
parent 871ae772
2004-04-14 Richard Henderson <rth@redhat.com>
PR middle-end/14311
* builtin-types.def (BT_BOOL, BT_VOLATILE_PTR, BT_I1, BT_I2,
BT_I4, BT_I8, BT_FN_VOID_VPTR, BT_FN_I1_VPTR_I1, BT_FN_I2_VPTR_I2,
BT_FN_I4_VPTR_I4, BT_FN_I8_VPTR_I8, BT_FN_BOOL_VPTR_I1_I1,
BT_FN_BOOL_VPTR_I2_I2, BT_FN_BOOL_VPTR_I4_I4, BT_FN_BOOL_VPTR_I8_I8,
BT_FN_I1_VPTR_I1_I1, BT_FN_I2_VPTR_I2_I2, BT_FN_I4_VPTR_I4_I4,
BT_FN_I8_VPTR_I8_I8): New.
* builtins.def (DEF_SYNC_BUILTIN): New.
(BUILT_IN_FETCH_AND_ADD_N, BUILT_IN_FETCH_AND_ADD_1,
BUILT_IN_FETCH_AND_ADD_2, BUILT_IN_FETCH_AND_ADD_4,
BUILT_IN_FETCH_AND_ADD_8, BUILT_IN_FETCH_AND_SUB_N,
BUILT_IN_FETCH_AND_SUB_1, BUILT_IN_FETCH_AND_SUB_2,
BUILT_IN_FETCH_AND_SUB_4, BUILT_IN_FETCH_AND_SUB_8,
BUILT_IN_FETCH_AND_OR_N, BUILT_IN_FETCH_AND_OR_1,
BUILT_IN_FETCH_AND_OR_2, BUILT_IN_FETCH_AND_OR_4,
BUILT_IN_FETCH_AND_OR_8, BUILT_IN_FETCH_AND_AND_N,
BUILT_IN_FETCH_AND_AND_1, BUILT_IN_FETCH_AND_AND_2,
BUILT_IN_FETCH_AND_AND_4, BUILT_IN_FETCH_AND_AND_8,
BUILT_IN_FETCH_AND_XOR_N, BUILT_IN_FETCH_AND_XOR_1,
BUILT_IN_FETCH_AND_XOR_2, BUILT_IN_FETCH_AND_XOR_4,
BUILT_IN_FETCH_AND_XOR_8, BUILT_IN_FETCH_AND_NAND_N,
BUILT_IN_FETCH_AND_NAND_1, BUILT_IN_FETCH_AND_NAND_2,
BUILT_IN_FETCH_AND_NAND_4, BUILT_IN_FETCH_AND_NAND_8,
BUILT_IN_ADD_AND_FETCH_N, BUILT_IN_ADD_AND_FETCH_1,
BUILT_IN_ADD_AND_FETCH_2, BUILT_IN_ADD_AND_FETCH_4,
BUILT_IN_ADD_AND_FETCH_8, BUILT_IN_SUB_AND_FETCH_N,
BUILT_IN_SUB_AND_FETCH_1, BUILT_IN_SUB_AND_FETCH_2,
BUILT_IN_SUB_AND_FETCH_4, BUILT_IN_SUB_AND_FETCH_8,
BUILT_IN_OR_AND_FETCH_N, BUILT_IN_OR_AND_FETCH_1,
BUILT_IN_OR_AND_FETCH_2, BUILT_IN_OR_AND_FETCH_4,
BUILT_IN_OR_AND_FETCH_8, BUILT_IN_AND_AND_FETCH_N,
BUILT_IN_AND_AND_FETCH_1, BUILT_IN_AND_AND_FETCH_2,
BUILT_IN_AND_AND_FETCH_4, BUILT_IN_AND_AND_FETCH_8,
BUILT_IN_XOR_AND_FETCH_N, BUILT_IN_XOR_AND_FETCH_1,
BUILT_IN_XOR_AND_FETCH_2, BUILT_IN_XOR_AND_FETCH_4,
BUILT_IN_XOR_AND_FETCH_8, BUILT_IN_NAND_AND_FETCH_N,
BUILT_IN_NAND_AND_FETCH_1, BUILT_IN_NAND_AND_FETCH_2,
BUILT_IN_NAND_AND_FETCH_4, BUILT_IN_NAND_AND_FETCH_8,
BUILT_IN_BOOL_COMPARE_AND_SWAP_N, BUILT_IN_BOOL_COMPARE_AND_SWAP_1,
BUILT_IN_BOOL_COMPARE_AND_SWAP_2, BUILT_IN_BOOL_COMPARE_AND_SWAP_4,
BUILT_IN_BOOL_COMPARE_AND_SWAP_8, BUILT_IN_VAL_COMPARE_AND_SWAP_N,
BUILT_IN_VAL_COMPARE_AND_SWAP_1, BUILT_IN_VAL_COMPARE_AND_SWAP_2,
BUILT_IN_VAL_COMPARE_AND_SWAP_4, BUILT_IN_VAL_COMPARE_AND_SWAP_8,
BUILT_IN_LOCK_TEST_AND_SET_N, BUILT_IN_LOCK_TEST_AND_SET_1,
BUILT_IN_LOCK_TEST_AND_SET_2, BUILT_IN_LOCK_TEST_AND_SET_4,
BUILT_IN_LOCK_TEST_AND_SET_8, BUILT_IN_LOCK_RELEASE_N,
BUILT_IN_LOCK_RELEASE_1, BUILT_IN_LOCK_RELEASE_2,
BUILT_IN_LOCK_RELEASE_4, BUILT_IN_LOCK_RELEASE_8,
BUILT_IN_SYNCHRONIZE: New.
* builtins.c (called_as_built_in): Rewrite from CALLED_AS_BUILT_IN
as a function. Accept __sync_ as a prefix as well.
(expand_builtin_sync_operation, expand_builtin_compare_and_swap,
expand_builtin_lock_test_and_set, expand_builtin_synchronize,
expand_builtin_lock_release): New.
(expand_builtin): Call them.
* c-common.c (DEF_BUILTIN): Don't require __builtin_ prefix if
neither BOTH_P nor FALLBACK_P are defined.
(builtin_type_for_size): New.
(sync_resolve_size, sync_resolve_params, sync_resolve_return): New.
(resolve_overloaded_builtin): New.
* c-common.h (resolve_overloaded_builtin): Declare.
(builtin_type_for_size): Declare.
* c-typeck.c (build_function_call): Invoke resolve_overloaded_builtin.
* expr.c (sync_add_optab, sync_sub_optab, sync_ior_optab,
sync_and_optab, sync_xor_optab, sync_nand_optab, sync_old_add_optab,
sync_old_sub_optab, sync_old_ior_optab, sync_old_and_optab,
sync_old_xor_optab, sync_old_nand_optab, sync_new_add_optab,
sync_new_sub_optab, sync_new_ior_optab, sync_new_and_optab,
sync_new_xor_optab, sync_new_nand_optab, sync_compare_and_swap,
sync_compare_and_swap_cc, sync_lock_test_and_set,
sync_lock_release): New.
* optabs.h: Declare them.
* expr.h (expand_val_compare_and_swap, expand_bool_compare_and_swap,
expand_sync_operation, expand_sync_fetch_operation,
expand_sync_lock_test_and_set): Declare.
* genopinit.c (optabs): Add sync optabs.
* optabs.c (init_optabs): Initialize sync optabs.
(expand_val_compare_and_swap_1, expand_val_compare_and_swap,
expand_bool_compare_and_swap, expand_compare_and_swap_loop,
expand_sync_operation, expand_sync_fetch_operation,
expand_sync_lock_test_and_set): New.
* doc/extend.texi (Atomic Builtins): New section
* doc/md.texi (Standard Names): Add sync patterns.
2005-04-14 Alexandre Oliva <aoliva@redhat.com>
* tree-eh.c (lower_try_finally_copy): Generate new code in
......
......@@ -60,6 +60,7 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
the type pointed to. */
DEF_PRIMITIVE_TYPE (BT_VOID, void_type_node)
DEF_PRIMITIVE_TYPE (BT_BOOL, boolean_type_node)
DEF_PRIMITIVE_TYPE (BT_INT, integer_type_node)
DEF_PRIMITIVE_TYPE (BT_UINT, unsigned_type_node)
DEF_PRIMITIVE_TYPE (BT_LONG, long_integer_type_node)
......@@ -79,6 +80,10 @@ DEF_PRIMITIVE_TYPE (BT_COMPLEX_LONGDOUBLE, complex_long_double_type_node)
DEF_PRIMITIVE_TYPE (BT_PTR, ptr_type_node)
DEF_PRIMITIVE_TYPE (BT_FILEPTR, fileptr_type_node)
DEF_PRIMITIVE_TYPE (BT_CONST_PTR, const_ptr_type_node)
DEF_PRIMITIVE_TYPE (BT_VOLATILE_PTR,
build_pointer_type
(build_qualified_type (void_type_node,
TYPE_QUAL_VOLATILE)))
DEF_PRIMITIVE_TYPE (BT_PTRMODE, (*lang_hooks.types.type_for_mode)(ptr_mode, 0))
DEF_PRIMITIVE_TYPE (BT_INT_PTR, integer_ptr_type_node)
DEF_PRIMITIVE_TYPE (BT_FLOAT_PTR, float_ptr_type_node)
......@@ -94,6 +99,11 @@ DEF_PRIMITIVE_TYPE (BT_CONST_STRING, const_string_type_node)
DEF_PRIMITIVE_TYPE (BT_VALIST_REF, va_list_ref_type_node)
DEF_PRIMITIVE_TYPE (BT_VALIST_ARG, va_list_arg_type_node)
DEF_PRIMITIVE_TYPE (BT_I1, builtin_type_for_size (BITS_PER_UNIT*1, 1))
DEF_PRIMITIVE_TYPE (BT_I2, builtin_type_for_size (BITS_PER_UNIT*2, 1))
DEF_PRIMITIVE_TYPE (BT_I4, builtin_type_for_size (BITS_PER_UNIT*4, 1))
DEF_PRIMITIVE_TYPE (BT_I8, builtin_type_for_size (BITS_PER_UNIT*8, 1))
DEF_POINTER_TYPE (BT_PTR_CONST_STRING, BT_CONST_STRING)
DEF_FUNCTION_TYPE_0 (BT_FN_VOID, BT_VOID)
......@@ -160,6 +170,7 @@ DEF_FUNCTION_TYPE_1 (BT_FN_STRING_CONST_STRING, BT_STRING, BT_CONST_STRING)
DEF_FUNCTION_TYPE_1 (BT_FN_WORD_PTR, BT_WORD, BT_PTR)
DEF_FUNCTION_TYPE_1 (BT_FN_INT_WINT, BT_INT, BT_WINT)
DEF_FUNCTION_TYPE_1 (BT_FN_WINT_WINT, BT_WINT, BT_WINT)
DEF_FUNCTION_TYPE_1 (BT_FN_VOID_VPTR, BT_VOID, BT_VOLATILE_PTR)
DEF_FUNCTION_TYPE_2 (BT_FN_VOID_PTR_INT, BT_VOID, BT_PTR, BT_INT)
DEF_FUNCTION_TYPE_2 (BT_FN_STRING_STRING_CONST_STRING,
......@@ -241,6 +252,10 @@ DEF_FUNCTION_TYPE_2 (BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOU
DEF_FUNCTION_TYPE_2 (BT_FN_VOID_PTR_PTR, BT_VOID, BT_PTR, BT_PTR)
DEF_FUNCTION_TYPE_2 (BT_FN_INT_CONST_STRING_PTR_CONST_STRING,
BT_INT, BT_CONST_STRING, BT_PTR_CONST_STRING)
DEF_FUNCTION_TYPE_2 (BT_FN_I1_VPTR_I1, BT_I1, BT_VOLATILE_PTR, BT_I1)
DEF_FUNCTION_TYPE_2 (BT_FN_I2_VPTR_I2, BT_I2, BT_VOLATILE_PTR, BT_I2)
DEF_FUNCTION_TYPE_2 (BT_FN_I4_VPTR_I4, BT_I4, BT_VOLATILE_PTR, BT_I4)
DEF_FUNCTION_TYPE_2 (BT_FN_I8_VPTR_I8, BT_I8, BT_VOLATILE_PTR, BT_I8)
DEF_FUNCTION_TYPE_3 (BT_FN_STRING_STRING_CONST_STRING_SIZE,
BT_STRING, BT_STRING, BT_CONST_STRING, BT_SIZE)
......@@ -285,6 +300,18 @@ DEF_FUNCTION_TYPE_3 (BT_FN_VOID_LONGDOUBLE_LONGDOUBLEPTR_LONGDOUBLEPTR,
DEF_FUNCTION_TYPE_3 (BT_FN_VOID_PTR_PTR_PTR, BT_VOID, BT_PTR, BT_PTR, BT_PTR)
DEF_FUNCTION_TYPE_3 (BT_FN_INT_CONST_STRING_PTR_CONST_STRING_PTR_CONST_STRING,
BT_INT, BT_CONST_STRING, BT_PTR_CONST_STRING, BT_PTR_CONST_STRING)
DEF_FUNCTION_TYPE_3 (BT_FN_BOOL_VPTR_I1_I1, BT_BOOL, BT_VOLATILE_PTR,
BT_I1, BT_I1)
DEF_FUNCTION_TYPE_3 (BT_FN_BOOL_VPTR_I2_I2, BT_BOOL, BT_VOLATILE_PTR,
BT_I2, BT_I2)
DEF_FUNCTION_TYPE_3 (BT_FN_BOOL_VPTR_I4_I4, BT_BOOL, BT_VOLATILE_PTR,
BT_I4, BT_I4)
DEF_FUNCTION_TYPE_3 (BT_FN_BOOL_VPTR_I8_I8, BT_BOOL, BT_VOLATILE_PTR,
BT_I8, BT_I8)
DEF_FUNCTION_TYPE_3 (BT_FN_I1_VPTR_I1_I1, BT_I1, BT_VOLATILE_PTR, BT_I1, BT_I1)
DEF_FUNCTION_TYPE_3 (BT_FN_I2_VPTR_I2_I2, BT_I2, BT_VOLATILE_PTR, BT_I2, BT_I2)
DEF_FUNCTION_TYPE_3 (BT_FN_I4_VPTR_I4_I4, BT_I4, BT_VOLATILE_PTR, BT_I4, BT_I4)
DEF_FUNCTION_TYPE_3 (BT_FN_I8_VPTR_I8_I8, BT_I8, BT_VOLATILE_PTR, BT_I8, BT_I8)
DEF_FUNCTION_TYPE_4 (BT_FN_SIZE_CONST_PTR_SIZE_SIZE_FILEPTR,
BT_SIZE, BT_CONST_PTR, BT_SIZE, BT_SIZE, BT_FILEPTR)
......
......@@ -3242,7 +3242,8 @@ c_common_nodes_and_builtins (void)
{ \
tree decl; \
\
gcc_assert (!strncmp (NAME, "__builtin_", \
gcc_assert ((!BOTH_P && !FALLBACK_P) \
|| !strncmp (NAME, "__builtin_", \
strlen ("__builtin_"))); \
\
if (!BOTH_P) \
......@@ -5836,4 +5837,170 @@ complete_array_type (tree *ptype, tree initial_value, bool do_default)
return failure;
}
/* Used to help initialize the builtin-types.def table. When a type of
the correct size doesn't exist, use error_mark_node instead of NULL.
The later results in segfaults even when a decl using the type doesn't
get invoked. */
tree
builtin_type_for_size (int size, bool unsignedp)
{
tree type = lang_hooks.types.type_for_size (size, unsignedp);
return type ? type : error_mark_node;
}
/* A helper function for resolve_overloaded_builtin in resolving the
overloaded __sync_ builtins. Returns a positive power of 2 if the
first operand of PARAMS is a pointer to a supported data type.
Returns 0 if an error is encountered. */
static int
sync_resolve_size (tree function, tree params)
{
tree type;
int size;
if (params == NULL)
{
error ("too few arguments to function %qE", function);
return 0;
}
type = TREE_TYPE (TREE_VALUE (params));
if (TREE_CODE (type) != POINTER_TYPE)
goto incompatible;
type = TREE_TYPE (type);
if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
goto incompatible;
size = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
if (size == 1 || size == 2 || size == 4 || size == 8)
return size;
incompatible:
error ("incompatible type for argument %d of %qE", 1, function);
return 0;
}
/* A helper function for resolve_overloaded_builtin. Adds casts to
PARAMS to make arguments match up with those of FUNCTION. Drops
the variadic arguments at the end. Returns false if some error
was encountered; true on success. */
static bool
sync_resolve_params (tree orig_function, tree function, tree params)
{
tree arg_types = TYPE_ARG_TYPES (TREE_TYPE (function));
tree ptype;
int number;
/* We've declared the implementation functions to use "volatile void *"
as the pointer parameter, so we shouldn't get any complaints from the
call to check_function_arguments what ever type the user used. */
arg_types = TREE_CHAIN (arg_types);
ptype = TREE_TYPE (TREE_TYPE (TREE_VALUE (params)));
number = 2;
/* For the rest of the values, we need to cast these to FTYPE, so that we
don't get warnings for passing pointer types, etc. */
while (arg_types != void_list_node)
{
tree val;
params = TREE_CHAIN (params);
if (params == NULL)
{
error ("too few arguments to function %qE", orig_function);
return false;
}
/* ??? Ideally for the first conversion we'd use convert_for_assignment
so that we get warnings for anything that doesn't match the pointer
type. This isn't portable across the C and C++ front ends atm. */
val = TREE_VALUE (params);
val = convert (ptype, val);
val = convert (TREE_VALUE (arg_types), val);
TREE_VALUE (params) = val;
arg_types = TREE_CHAIN (arg_types);
number++;
}
/* The definition of these primitives is variadic, with the remaining
being "an optional list of variables protected by the memory barrier".
No clue what that's supposed to mean, precisely, but we consider all
call-clobbered variables to be protected so we're safe. */
TREE_CHAIN (params) = NULL;
return true;
}
/* A helper function for resolve_overloaded_builtin. Adds a cast to
RESULT to make it match the type of the first pointer argument in
PARAMS. */
static tree
sync_resolve_return (tree params, tree result)
{
tree ptype = TREE_TYPE (TREE_TYPE (TREE_VALUE (params)));
return convert (ptype, result);
}
/* Some builtin functions are placeholders for other expressions. This
function should be called immediately after parsing the call expression
before surrounding code has committed to the type of the expression.
FUNCTION is the DECL that has been invoked; it is known to be a builtin.
PARAMS is the argument list for the call. The return value is non-null
when expansion is complete, and null if normal processing should
continue. */
tree
resolve_overloaded_builtin (tree function, tree params)
{
enum built_in_function orig_code = DECL_FUNCTION_CODE (function);
switch (orig_code)
{
case BUILT_IN_FETCH_AND_ADD_N:
case BUILT_IN_FETCH_AND_SUB_N:
case BUILT_IN_FETCH_AND_OR_N:
case BUILT_IN_FETCH_AND_AND_N:
case BUILT_IN_FETCH_AND_XOR_N:
case BUILT_IN_FETCH_AND_NAND_N:
case BUILT_IN_ADD_AND_FETCH_N:
case BUILT_IN_SUB_AND_FETCH_N:
case BUILT_IN_OR_AND_FETCH_N:
case BUILT_IN_AND_AND_FETCH_N:
case BUILT_IN_XOR_AND_FETCH_N:
case BUILT_IN_NAND_AND_FETCH_N:
case BUILT_IN_BOOL_COMPARE_AND_SWAP_N:
case BUILT_IN_VAL_COMPARE_AND_SWAP_N:
case BUILT_IN_LOCK_TEST_AND_SET_N:
case BUILT_IN_LOCK_RELEASE_N:
{
int n = sync_resolve_size (function, params);
tree new_function, result;
if (n == 0)
return error_mark_node;
new_function = built_in_decls[orig_code + exact_log2 (n) + 1];
if (!sync_resolve_params (function, new_function, params))
return error_mark_node;
result = build_function_call (new_function, params);
if (orig_code != BUILT_IN_BOOL_COMPARE_AND_SWAP_N
&& orig_code != BUILT_IN_LOCK_RELEASE_N)
result = sync_resolve_return (params, result);
return result;
}
default:
return NULL;
}
}
#include "gt-c-common.h"
......@@ -798,6 +798,8 @@ extern void c_do_switch_warnings (splay_tree, location_t, tree, tree);
extern tree build_function_call (tree, tree);
extern tree resolve_overloaded_builtin (tree, tree);
extern tree finish_label_address_expr (tree);
/* Same function prototype, but the C and C++ front ends have
......@@ -860,6 +862,8 @@ extern void lvalue_error (enum lvalue_use);
extern int complete_array_type (tree *, tree, bool);
extern tree builtin_type_for_size (int, bool);
/* In c-gimplify.c */
extern void c_genericize (tree);
extern int c_gimplify_expr (tree *, tree *, tree *);
......
......@@ -1978,6 +1978,13 @@ build_function_call (tree function, tree params)
/* Convert anything with function type to a pointer-to-function. */
if (TREE_CODE (function) == FUNCTION_DECL)
{
if (DECL_BUILT_IN_CLASS (function) == BUILT_IN_NORMAL)
{
tem = resolve_overloaded_builtin (function, params);
if (tem)
return tem;
}
name = DECL_NAME (function);
/* Differs from default_conversion by not setting TREE_ADDRESSABLE
......
......@@ -70,6 +70,7 @@ extensions, accepted by GCC in C89 mode and in C++.
* Return Address:: Getting the return or frame address of a function.
* Vector Extensions:: Using vector instructions through built-in functions.
* Offsetof:: Special syntax for implementing @code{offsetof}.
* Atomic Builtins:: Built-in functions for atomic memory access.
* Other Builtins:: Other built-in functions.
* Target Builtins:: Built-in functions specific to particular targets.
* Target Format Checks:: Format checks specific to particular targets.
......@@ -4581,6 +4582,133 @@ is a suitable definition of the @code{offsetof} macro. In C++, @var{type}
may be dependent. In either case, @var{member} may consist of a single
identifier, or a sequence of member accesses and array references.
@node Atomic Builtins
@section Built-in functions for atomic memory access
The following builtins are intended to be compatible with those described
in the @cite{Intel Itanium Processor-specific Application Binary Interface},
section 7.4. As such, they depart from the normal GCC practice of using
the ``__builtin_'' prefix, and further that they are overloaded such that
they work on multiple types.
The definition given in the Intel documentation allows only for the use of
the types @code{int}, @code{long}, @code{long long} as well as their unsigned
counterparts. GCC will allow any integral scalar or pointer type that is
1, 2, 4 or 8 bytes in length.
Not all operations are supported by all target processors. If a particular
operation cannot be implemented on the target processor, a warning will be
generated and a call an external function will be generated. The external
function will carry the same name as the builtin, with an additional suffix
@samp{_@var{n}} where @var{n} is the size of the data type.
@c ??? Should we have a mechanism to suppress this warning? This is almost
@c useful for implementing the operation under the control of an external
@c mutex.
In most cases, these builtins are considered a @dfn{full barrier}. That is,
no memory operand will be moved across the operation, either forward or
backward. Further, instructions will be issued as necessary to prevent the
processor from speculating loads across the operation and from queuing stores
after the operation.
All of the routines are are described in the Intel documentation to take
``an optional list of variables protected by the memory barrier''. It's
not clear what is meant by that; it could mean that @emph{only} the
following variables are protected, or it could mean that these variables
should in addition be protected. At present GCC ignores this list and
protects all variables which are globally accessible. If in the future
we make some use of this list, an empty list will continue to mean all
globally accessible variables.
@table @code
@item @var{type} __sync_fetch_and_add (@var{type} *ptr, @var{type} value, ...)
@itemx @var{type} __sync_fetch_and_sub (@var{type} *ptr, @var{type} value, ...)
@itemx @var{type} __sync_fetch_and_or (@var{type} *ptr, @var{type} value, ...)
@itemx @var{type} __sync_fetch_and_and (@var{type} *ptr, @var{type} value, ...)
@itemx @var{type} __sync_fetch_and_xor (@var{type} *ptr, @var{type} value, ...)
@itemx @var{type} __sync_fetch_and_nand (@var{type} *ptr, @var{type} value, ...)
@findex __sync_fetch_and_add
@findex __sync_fetch_and_sub
@findex __sync_fetch_and_or
@findex __sync_fetch_and_and
@findex __sync_fetch_and_xor
@findex __sync_fetch_and_nand
These builtins perform the operation suggested by the name, and
returns the value that had previously been in memory. That is,
@smallexample
@{ tmp = *ptr; *ptr @var{op}= value; return tmp; @}
@end smallexample
The builtin @code{__sync_fetch_and_nand} could be implemented by
@code{__sync_fetch_and_and(ptr, ~value)}.
@item @var{type} __sync_add_and_fetch (@var{type} *ptr, @var{type} value, ...)
@itemx @var{type} __sync_sub_and_fetch (@var{type} *ptr, @var{type} value, ...)
@itemx @var{type} __sync_or_and_fetch (@var{type} *ptr, @var{type} value, ...)
@itemx @var{type} __sync_and_and_fetch (@var{type} *ptr, @var{type} value, ...)
@itemx @var{type} __sync_xor_and_fetch (@var{type} *ptr, @var{type} value, ...)
@itemx @var{type} __sync_nand_and_fetch (@var{type} *ptr, @var{type} value, ...)
@findex __sync_add_and_fetch
@findex __sync_sub_and_fetch
@findex __sync_or_and_fetch
@findex __sync_and_and_fetch
@findex __sync_xor_and_fetch
@findex __sync_nand_and_fetch
These builtins perform the operation suggested by the name, and
return the new value. That is,
@smallexample
@{ *ptr @var{op}= value; return *ptr; @}
@end smallexample
@item bool __sync_bool_compare_and_swap (@var{type} *ptr, @var{type} oldval @var{type} newval, ...)
@itemx @var{type} __sync_val_compare_and_swap (@var{type} *ptr, @var{type} oldval @var{type} newval, ...)
@findex __sync_bool_compare_and_swap
@findex __sync_val_compare_and_swap
These builtins perform an atomic compare and swap. That is, if the current
value of @code{*@var{ptr}} is @var{oldval}, then write @var{newval} into
@code{*@var{ptr}}.
The ``bool'' version returns true if the comparison is successful and
@var{newval} was written. The ``val'' version returns the contents
of @code{*@var{ptr}} after the operation.
@item __sync_synchronize (...)
@findex __sync_synchronize
This builtin issues a full memory barrier.
@item @var{type} __sync_lock_test_and_set (@var{type} *ptr, @var{type} value, ...)
@findex __sync_lock_test_and_set
This builtin, as described by Intel, is not a traditional test-and-set
operation, but rather an atomic exchange operation. It writes @var{value}
into @code{*@var{ptr}}, and returns the previous contents of
@code{*@var{ptr}}.
Many targets have only minimal support for such locks, and do not support
a full exchange operation. In this case, a target may support reduced
functionality here by which the @emph{only} valid value to store is the
immediate constant 1. The exact value actually stored in @code{*@var{ptr}}
is implementation defined.
This builtin is not a full barrier, but rather an @dfn{acquire barrier}.
This means that references after the builtin cannot move to (or be
speculated to) before the builtin, but previous memory stores may not
be globally visible yet, and previous memory loads may not yet be
satisfied.
@item void __sync_lock_release (@var{type} *ptr, ...)
@findex __sync_lock_release
This builtin releases the lock acquired by @code{__sync_lock_test_and_set}.
Normally this means writing the constant 0 to @code{*@var{ptr}}.
This builtin is not a full barrier, but rather a @dfn{release barrier}.
This means that all previous memory stores are globally visible, and all
previous memory loads have been satisfied, but following memory reads
are not prevented from being speculated to before the barrier.
@end table
@node Other Builtins
@section Other built-in functions provided by GCC
@cindex built-in functions
......
......@@ -3936,6 +3936,140 @@ respectively, a low or moderate degree of temporal locality.
Targets that do not support write prefetches or locality hints can ignore
the values of operands 1 and 2.
@cindex @code{memory_barrier} instruction pattern
@item @samp{memory_barrier}
If the target memory model is not fully synchronous, then this pattern
should be defined to an instruction that orders both loads and stores
before the instruction with respect to loads and stores after the instruction.
This pattern has no operands.
@cindex @code{sync_compare_and_swap@var{mode}} instruction pattern
@item @samp{sync_compare_and_swap@var{mode}}
This pattern, if defined, emits code for an atomic compare-and-swap
operation. Operand 1 is the memory on which the atomic operation is
performed. Operand 2 is the ``old'' value to be compared against the
current contents of the memory location. Operand 3 is the ``new'' value
to store in the memory if the compare succeeds. Operand 0 is the result
of the operation; it should contain the current contents of the memory
after the operation. If the compare succeeds, this should obviously be
a copy of operand 3.
This pattern must show that both operand 0 and operand 1 are modified.
This pattern must issue any memory barrier instructions such that the
pattern as a whole acts as a full barrier.
@cindex @code{sync_compare_and_swap_cc@var{mode}} instruction pattern
@item @samp{sync_compare_and_swap_cc@var{mode}}
This pattern is just like @code{sync_compare_and_swap@var{mode}}, except
it should act as if compare part of the compare-and-swap were issued via
@code{cmp@var{m}}. This comparison will only be used with @code{EQ} and
@code{NE} branches and @code{setcc} operations.
Some targets do expose the success or failure of the compare-and-swap
operation via the status flags. Ideally we wouldn't need a separate
named pattern in order to take advantage of this, but the combine pass
does not handle patterns with multiple sets, which is required by
definition for @code{sync_compare_and_swap@var{mode}}.
@cindex @code{sync_add@var{mode}} instruction pattern
@cindex @code{sync_sub@var{mode}} instruction pattern
@cindex @code{sync_ior@var{mode}} instruction pattern
@cindex @code{sync_and@var{mode}} instruction pattern
@cindex @code{sync_xor@var{mode}} instruction pattern
@cindex @code{sync_nand@var{mode}} instruction pattern
@item @samp{sync_add@var{mode}}, @samp{sync_sub@var{mode}}
@itemx @samp{sync_ior@var{mode}}, @samp{sync_and@var{mode}}
@itemx @samp{sync_xor@var{mode}}, @samp{sync_nand@var{mode}}
These patterns emit code for an atomic operation on memory.
Operand 0 is the memory on which the atomic operation is performed.
Operand 1 is the second operand to the binary operator.
The ``nand'' operation is @code{op0 & ~op1}.
This pattern must issue any memory barrier instructions such that the
pattern as a whole acts as a full barrier.
If these patterns are not defined, the operation will be constructed
from a compare-and-swap operation, if defined.
@cindex @code{sync_old_add@var{mode}} instruction pattern
@cindex @code{sync_old_sub@var{mode}} instruction pattern
@cindex @code{sync_old_ior@var{mode}} instruction pattern
@cindex @code{sync_old_and@var{mode}} instruction pattern
@cindex @code{sync_old_xor@var{mode}} instruction pattern
@cindex @code{sync_old_nand@var{mode}} instruction pattern
@item @samp{sync_old_add@var{mode}}, @samp{sync_old_sub@var{mode}}
@itemx @samp{sync_old_ior@var{mode}}, @samp{sync_old_and@var{mode}}
@itemx @samp{sync_old_xor@var{mode}}, @samp{sync_old_nand@var{mode}}
These patterns are emit code for an atomic operation on memory,
and return the value that the memory contained before the operation.
Operand 0 is the result value, operand 1 is the memory on which the
atomic operation is performed, and operand 2 is the second operand
to the binary operator.
This pattern must issue any memory barrier instructions such that the
pattern as a whole acts as a full barrier.
If these patterns are not defined, the operation will be constructed
from a compare-and-swap operation, if defined.
@cindex @code{sync_new_add@var{mode}} instruction pattern
@cindex @code{sync_new_sub@var{mode}} instruction pattern
@cindex @code{sync_new_ior@var{mode}} instruction pattern
@cindex @code{sync_new_and@var{mode}} instruction pattern
@cindex @code{sync_new_xor@var{mode}} instruction pattern
@cindex @code{sync_new_nand@var{mode}} instruction pattern
@item @samp{sync_new_add@var{mode}}, @samp{sync_new_sub@var{mode}}
@itemx @samp{sync_new_ior@var{mode}}, @samp{sync_new_and@var{mode}}
@itemx @samp{sync_new_xor@var{mode}}, @samp{sync_new_nand@var{mode}}
These patterns are like their @code{sync_old_@var{op}} counterparts,
except that they return the value that exists in the memory location
after the operation, rather than before the operation.
@cindex @code{sync_lock_test_and_set@var{mode}} instruction pattern
@item @samp{sync_lock_test_and_set@var{mode}}
This pattern takes two forms, based on the capabilities of the target.
In either case, operand 0 is the result of the operand, operand 1 is
the memory on which the atomic operation is performed, and operand 2
is the value to set in the lock.
In the ideal case, this operation is an atomic exchange operation, in
which the previous value in memory operand is copied into the result
operand, and the value operand is stored in the memory operand.
For less capable targets, any value operand that is not the constant 1
should be rejected with @code{FAIL}. In this case the target may use
an atomic test-and-set bit operation. The result operand should contain
1 if the bit was previously set and 0 if the bit was previously clear.
The true contents of the memory operand are implementation defined.
This pattern must issue any memory barrier instructions such that the
pattern as a whole acts as an acquire barrier.
If this pattern is not defined, the operation will be constructed from
a compare-and-swap operation, if defined.
@cindex @code{sync_lock_release@var{mode}} instruction pattern
@item @samp{sync_lock_release@var{mode}}
This pattern, if defined, releases a lock set by
@code{sync_lock_test_and_set@var{mode}}. Operand 0 is the memory
that contains the lock.
This pattern must issue any memory barrier instructions such that the
pattern as a whole acts as a release barrier.
If this pattern is not defined, then a @code{memory_barrier} pattern
will be emitted, followed by a store of zero to the memory operand.
@end table
@end ifset
......
......@@ -208,6 +208,30 @@ enum insn_code clrmem_optab[NUM_MACHINE_MODES];
enum insn_code cmpstr_optab[NUM_MACHINE_MODES];
enum insn_code cmpmem_optab[NUM_MACHINE_MODES];
/* Synchronization primitives. */
enum insn_code sync_add_optab[NUM_MACHINE_MODES];
enum insn_code sync_sub_optab[NUM_MACHINE_MODES];
enum insn_code sync_ior_optab[NUM_MACHINE_MODES];
enum insn_code sync_and_optab[NUM_MACHINE_MODES];
enum insn_code sync_xor_optab[NUM_MACHINE_MODES];
enum insn_code sync_nand_optab[NUM_MACHINE_MODES];
enum insn_code sync_old_add_optab[NUM_MACHINE_MODES];
enum insn_code sync_old_sub_optab[NUM_MACHINE_MODES];
enum insn_code sync_old_ior_optab[NUM_MACHINE_MODES];
enum insn_code sync_old_and_optab[NUM_MACHINE_MODES];
enum insn_code sync_old_xor_optab[NUM_MACHINE_MODES];
enum insn_code sync_old_nand_optab[NUM_MACHINE_MODES];
enum insn_code sync_new_add_optab[NUM_MACHINE_MODES];
enum insn_code sync_new_sub_optab[NUM_MACHINE_MODES];
enum insn_code sync_new_ior_optab[NUM_MACHINE_MODES];
enum insn_code sync_new_and_optab[NUM_MACHINE_MODES];
enum insn_code sync_new_xor_optab[NUM_MACHINE_MODES];
enum insn_code sync_new_nand_optab[NUM_MACHINE_MODES];
enum insn_code sync_compare_and_swap[NUM_MACHINE_MODES];
enum insn_code sync_compare_and_swap_cc[NUM_MACHINE_MODES];
enum insn_code sync_lock_test_and_set[NUM_MACHINE_MODES];
enum insn_code sync_lock_release[NUM_MACHINE_MODES];
/* SLOW_UNALIGNED_ACCESS is nonzero if unaligned accesses are very slow. */
#ifndef SLOW_UNALIGNED_ACCESS
......
......@@ -310,6 +310,11 @@ int can_conditionally_move_p (enum machine_mode mode);
rtx emit_conditional_add (rtx, enum rtx_code, rtx, rtx, enum machine_mode,
rtx, rtx, enum machine_mode, int);
rtx expand_val_compare_and_swap (rtx, rtx, rtx, rtx);
rtx expand_bool_compare_and_swap (rtx, rtx, rtx, rtx);
rtx expand_sync_operation (rtx, rtx, enum rtx_code);
rtx expand_sync_fetch_operation (rtx, rtx, enum rtx_code, bool, rtx);
rtx expand_sync_lock_test_and_set (rtx, rtx, rtx);
/* Functions from expmed.c: */
......
......@@ -171,12 +171,35 @@ static const char * const optabs[] =
"clrmem_optab[$A] = CODE_FOR_$(clrmem$a$)",
"cmpstr_optab[$A] = CODE_FOR_$(cmpstr$a$)",
"cmpmem_optab[$A] = CODE_FOR_$(cmpmem$a$)",
"sync_add_optab[$A] = CODE_FOR_$(sync_add$I$a$)",
"sync_sub_optab[$A] = CODE_FOR_$(sync_sub$I$a$)",
"sync_ior_optab[$A] = CODE_FOR_$(sync_ior$I$a$)",
"sync_and_optab[$A] = CODE_FOR_$(sync_and$I$a$)",
"sync_xor_optab[$A] = CODE_FOR_$(sync_xor$I$a$)",
"sync_nand_optab[$A] = CODE_FOR_$(sync_nand$I$a$)",
"sync_old_add_optab[$A] = CODE_FOR_$(sync_old_add$I$a$)",
"sync_old_sub_optab[$A] = CODE_FOR_$(sync_old_sub$I$a$)",
"sync_old_ior_optab[$A] = CODE_FOR_$(sync_old_ior$I$a$)",
"sync_old_and_optab[$A] = CODE_FOR_$(sync_old_and$I$a$)",
"sync_old_xor_optab[$A] = CODE_FOR_$(sync_old_xor$I$a$)",
"sync_old_nand_optab[$A] = CODE_FOR_$(sync_old_nand$I$a$)",
"sync_new_add_optab[$A] = CODE_FOR_$(sync_new_add$I$a$)",
"sync_new_sub_optab[$A] = CODE_FOR_$(sync_new_sub$I$a$)",
"sync_new_ior_optab[$A] = CODE_FOR_$(sync_new_ior$I$a$)",
"sync_new_and_optab[$A] = CODE_FOR_$(sync_new_and$I$a$)",
"sync_new_xor_optab[$A] = CODE_FOR_$(sync_new_xor$I$a$)",
"sync_new_nand_optab[$A] = CODE_FOR_$(sync_new_nand$I$a$)",
"sync_compare_and_swap[$A] = CODE_FOR_$(sync_compare_and_swap$I$a$)",
"sync_compare_and_swap_cc[$A] = CODE_FOR_$(sync_compare_and_swap_cc$I$a$)",
"sync_lock_test_and_set[$A] = CODE_FOR_$(sync_lock_test_and_set$I$a$)",
"sync_lock_release[$A] = CODE_FOR_$(sync_lock_release$I$a$)",
"vec_set_optab->handlers[$A].insn_code = CODE_FOR_$(vec_set$a$)",
"vec_extract_optab->handlers[$A].insn_code = CODE_FOR_$(vec_extract$a$)",
"vec_init_optab->handlers[$A].insn_code = CODE_FOR_$(vec_init$a$)",
"vec_realign_load_optab->handlers[$A].insn_code = CODE_FOR_$(vec_realign_load_$a$)",
"vcond_gen_code[$A] = CODE_FOR_$(vcond$a$)",
"vcondu_gen_code[$A] = CODE_FOR_$(vcondu$a$)" };
"vcondu_gen_code[$A] = CODE_FOR_$(vcondu$a$)"
};
static void gen_insn (rtx);
......
......@@ -432,6 +432,43 @@ extern enum insn_code clrmem_optab[NUM_MACHINE_MODES];
extern enum insn_code cmpstr_optab[NUM_MACHINE_MODES];
extern enum insn_code cmpmem_optab[NUM_MACHINE_MODES];
/* Synchronization primitives. This first set is atomic operation for
which we don't care about the resulting value. */
extern enum insn_code sync_add_optab[NUM_MACHINE_MODES];
extern enum insn_code sync_sub_optab[NUM_MACHINE_MODES];
extern enum insn_code sync_ior_optab[NUM_MACHINE_MODES];
extern enum insn_code sync_and_optab[NUM_MACHINE_MODES];
extern enum insn_code sync_xor_optab[NUM_MACHINE_MODES];
extern enum insn_code sync_nand_optab[NUM_MACHINE_MODES];
/* This second set is atomic operations in which we return the value
that existed in memory before the operation. */
extern enum insn_code sync_old_add_optab[NUM_MACHINE_MODES];
extern enum insn_code sync_old_sub_optab[NUM_MACHINE_MODES];
extern enum insn_code sync_old_ior_optab[NUM_MACHINE_MODES];
extern enum insn_code sync_old_and_optab[NUM_MACHINE_MODES];
extern enum insn_code sync_old_xor_optab[NUM_MACHINE_MODES];
extern enum insn_code sync_old_nand_optab[NUM_MACHINE_MODES];
/* This third set is atomic operations in which we return the value
that resulted after performing the operation. */
extern enum insn_code sync_new_add_optab[NUM_MACHINE_MODES];
extern enum insn_code sync_new_sub_optab[NUM_MACHINE_MODES];
extern enum insn_code sync_new_ior_optab[NUM_MACHINE_MODES];
extern enum insn_code sync_new_and_optab[NUM_MACHINE_MODES];
extern enum insn_code sync_new_xor_optab[NUM_MACHINE_MODES];
extern enum insn_code sync_new_nand_optab[NUM_MACHINE_MODES];
/* Atomic compare and swap. */
extern enum insn_code sync_compare_and_swap[NUM_MACHINE_MODES];
extern enum insn_code sync_compare_and_swap_cc[NUM_MACHINE_MODES];
/* Atomic exchange with acquire semantics. */
extern enum insn_code sync_lock_test_and_set[NUM_MACHINE_MODES];
/* Atomic clear with release semantics. */
extern enum insn_code sync_lock_release[NUM_MACHINE_MODES];
/* Define functions given in optabs.c. */
extern rtx expand_ternary_op (enum machine_mode mode, optab ternary_optab,
......
/* Validate that each of the __sync builtins compiles. This won't
necessarily link, since the target might not support the builtin,
so this may result in external library calls. */
signed char sc;
unsigned char uc;
signed short ss;
unsigned short us;
signed int si;
unsigned int ui;
signed long sl;
unsigned long ul;
signed long long sll;
unsigned long long ull;
void *vp;
int *ip;
struct S { struct S *next; int x; } *sp;
void test_op_ignore (void)
{
(void) __sync_fetch_and_add (&sc, 1);
(void) __sync_fetch_and_add (&uc, 1);
(void) __sync_fetch_and_add (&ss, 1);
(void) __sync_fetch_and_add (&us, 1);
(void) __sync_fetch_and_add (&si, 1);
(void) __sync_fetch_and_add (&ui, 1);
(void) __sync_fetch_and_add (&sl, 1);
(void) __sync_fetch_and_add (&ul, 1);
(void) __sync_fetch_and_add (&sll, 1);
(void) __sync_fetch_and_add (&ull, 1);
(void) __sync_fetch_and_sub (&sc, 1);
(void) __sync_fetch_and_sub (&uc, 1);
(void) __sync_fetch_and_sub (&ss, 1);
(void) __sync_fetch_and_sub (&us, 1);
(void) __sync_fetch_and_sub (&si, 1);
(void) __sync_fetch_and_sub (&ui, 1);
(void) __sync_fetch_and_sub (&sl, 1);
(void) __sync_fetch_and_sub (&ul, 1);
(void) __sync_fetch_and_sub (&sll, 1);
(void) __sync_fetch_and_sub (&ull, 1);
(void) __sync_fetch_and_or (&sc, 1);
(void) __sync_fetch_and_or (&uc, 1);
(void) __sync_fetch_and_or (&ss, 1);
(void) __sync_fetch_and_or (&us, 1);
(void) __sync_fetch_and_or (&si, 1);
(void) __sync_fetch_and_or (&ui, 1);
(void) __sync_fetch_and_or (&sl, 1);
(void) __sync_fetch_and_or (&ul, 1);
(void) __sync_fetch_and_or (&sll, 1);
(void) __sync_fetch_and_or (&ull, 1);
(void) __sync_fetch_and_xor (&sc, 1);
(void) __sync_fetch_and_xor (&uc, 1);
(void) __sync_fetch_and_xor (&ss, 1);
(void) __sync_fetch_and_xor (&us, 1);
(void) __sync_fetch_and_xor (&si, 1);
(void) __sync_fetch_and_xor (&ui, 1);
(void) __sync_fetch_and_xor (&sl, 1);
(void) __sync_fetch_and_xor (&ul, 1);
(void) __sync_fetch_and_xor (&sll, 1);
(void) __sync_fetch_and_xor (&ull, 1);
(void) __sync_fetch_and_and (&sc, 1);
(void) __sync_fetch_and_and (&uc, 1);
(void) __sync_fetch_and_and (&ss, 1);
(void) __sync_fetch_and_and (&us, 1);
(void) __sync_fetch_and_and (&si, 1);
(void) __sync_fetch_and_and (&ui, 1);
(void) __sync_fetch_and_and (&sl, 1);
(void) __sync_fetch_and_and (&ul, 1);
(void) __sync_fetch_and_and (&sll, 1);
(void) __sync_fetch_and_and (&ull, 1);
(void) __sync_fetch_and_nand (&sc, 1);
(void) __sync_fetch_and_nand (&uc, 1);
(void) __sync_fetch_and_nand (&ss, 1);
(void) __sync_fetch_and_nand (&us, 1);
(void) __sync_fetch_and_nand (&si, 1);
(void) __sync_fetch_and_nand (&ui, 1);
(void) __sync_fetch_and_nand (&sl, 1);
(void) __sync_fetch_and_nand (&ul, 1);
(void) __sync_fetch_and_nand (&sll, 1);
(void) __sync_fetch_and_nand (&ull, 1);
}
void test_fetch_and_op (void)
{
sc = __sync_fetch_and_add (&sc, 11);
uc = __sync_fetch_and_add (&uc, 11);
ss = __sync_fetch_and_add (&ss, 11);
us = __sync_fetch_and_add (&us, 11);
si = __sync_fetch_and_add (&si, 11);
ui = __sync_fetch_and_add (&ui, 11);
sl = __sync_fetch_and_add (&sl, 11);
ul = __sync_fetch_and_add (&ul, 11);
sll = __sync_fetch_and_add (&sll, 11);
ull = __sync_fetch_and_add (&ull, 11);
sc = __sync_fetch_and_sub (&sc, 11);
uc = __sync_fetch_and_sub (&uc, 11);
ss = __sync_fetch_and_sub (&ss, 11);
us = __sync_fetch_and_sub (&us, 11);
si = __sync_fetch_and_sub (&si, 11);
ui = __sync_fetch_and_sub (&ui, 11);
sl = __sync_fetch_and_sub (&sl, 11);
ul = __sync_fetch_and_sub (&ul, 11);
sll = __sync_fetch_and_sub (&sll, 11);
ull = __sync_fetch_and_sub (&ull, 11);
sc = __sync_fetch_and_or (&sc, 11);
uc = __sync_fetch_and_or (&uc, 11);
ss = __sync_fetch_and_or (&ss, 11);
us = __sync_fetch_and_or (&us, 11);
si = __sync_fetch_and_or (&si, 11);
ui = __sync_fetch_and_or (&ui, 11);
sl = __sync_fetch_and_or (&sl, 11);
ul = __sync_fetch_and_or (&ul, 11);
sll = __sync_fetch_and_or (&sll, 11);
ull = __sync_fetch_and_or (&ull, 11);
sc = __sync_fetch_and_xor (&sc, 11);
uc = __sync_fetch_and_xor (&uc, 11);
ss = __sync_fetch_and_xor (&ss, 11);
us = __sync_fetch_and_xor (&us, 11);
si = __sync_fetch_and_xor (&si, 11);
ui = __sync_fetch_and_xor (&ui, 11);
sl = __sync_fetch_and_xor (&sl, 11);
ul = __sync_fetch_and_xor (&ul, 11);
sll = __sync_fetch_and_xor (&sll, 11);
ull = __sync_fetch_and_xor (&ull, 11);
sc = __sync_fetch_and_and (&sc, 11);
uc = __sync_fetch_and_and (&uc, 11);
ss = __sync_fetch_and_and (&ss, 11);
us = __sync_fetch_and_and (&us, 11);
si = __sync_fetch_and_and (&si, 11);
ui = __sync_fetch_and_and (&ui, 11);
sl = __sync_fetch_and_and (&sl, 11);
ul = __sync_fetch_and_and (&ul, 11);
sll = __sync_fetch_and_and (&sll, 11);
ull = __sync_fetch_and_and (&ull, 11);
sc = __sync_fetch_and_nand (&sc, 11);
uc = __sync_fetch_and_nand (&uc, 11);
ss = __sync_fetch_and_nand (&ss, 11);
us = __sync_fetch_and_nand (&us, 11);
si = __sync_fetch_and_nand (&si, 11);
ui = __sync_fetch_and_nand (&ui, 11);
sl = __sync_fetch_and_nand (&sl, 11);
ul = __sync_fetch_and_nand (&ul, 11);
sll = __sync_fetch_and_nand (&sll, 11);
ull = __sync_fetch_and_nand (&ull, 11);
}
void test_op_and_fetch (void)
{
sc = __sync_add_and_fetch (&sc, uc);
uc = __sync_add_and_fetch (&uc, uc);
ss = __sync_add_and_fetch (&ss, uc);
us = __sync_add_and_fetch (&us, uc);
si = __sync_add_and_fetch (&si, uc);
ui = __sync_add_and_fetch (&ui, uc);
sl = __sync_add_and_fetch (&sl, uc);
ul = __sync_add_and_fetch (&ul, uc);
sll = __sync_add_and_fetch (&sll, uc);
ull = __sync_add_and_fetch (&ull, uc);
sc = __sync_sub_and_fetch (&sc, uc);
uc = __sync_sub_and_fetch (&uc, uc);
ss = __sync_sub_and_fetch (&ss, uc);
us = __sync_sub_and_fetch (&us, uc);
si = __sync_sub_and_fetch (&si, uc);
ui = __sync_sub_and_fetch (&ui, uc);
sl = __sync_sub_and_fetch (&sl, uc);
ul = __sync_sub_and_fetch (&ul, uc);
sll = __sync_sub_and_fetch (&sll, uc);
ull = __sync_sub_and_fetch (&ull, uc);
sc = __sync_or_and_fetch (&sc, uc);
uc = __sync_or_and_fetch (&uc, uc);
ss = __sync_or_and_fetch (&ss, uc);
us = __sync_or_and_fetch (&us, uc);
si = __sync_or_and_fetch (&si, uc);
ui = __sync_or_and_fetch (&ui, uc);
sl = __sync_or_and_fetch (&sl, uc);
ul = __sync_or_and_fetch (&ul, uc);
sll = __sync_or_and_fetch (&sll, uc);
ull = __sync_or_and_fetch (&ull, uc);
sc = __sync_xor_and_fetch (&sc, uc);
uc = __sync_xor_and_fetch (&uc, uc);
ss = __sync_xor_and_fetch (&ss, uc);
us = __sync_xor_and_fetch (&us, uc);
si = __sync_xor_and_fetch (&si, uc);
ui = __sync_xor_and_fetch (&ui, uc);
sl = __sync_xor_and_fetch (&sl, uc);
ul = __sync_xor_and_fetch (&ul, uc);
sll = __sync_xor_and_fetch (&sll, uc);
ull = __sync_xor_and_fetch (&ull, uc);
sc = __sync_and_and_fetch (&sc, uc);
uc = __sync_and_and_fetch (&uc, uc);
ss = __sync_and_and_fetch (&ss, uc);
us = __sync_and_and_fetch (&us, uc);
si = __sync_and_and_fetch (&si, uc);
ui = __sync_and_and_fetch (&ui, uc);
sl = __sync_and_and_fetch (&sl, uc);
ul = __sync_and_and_fetch (&ul, uc);
sll = __sync_and_and_fetch (&sll, uc);
ull = __sync_and_and_fetch (&ull, uc);
sc = __sync_nand_and_fetch (&sc, uc);
uc = __sync_nand_and_fetch (&uc, uc);
ss = __sync_nand_and_fetch (&ss, uc);
us = __sync_nand_and_fetch (&us, uc);
si = __sync_nand_and_fetch (&si, uc);
ui = __sync_nand_and_fetch (&ui, uc);
sl = __sync_nand_and_fetch (&sl, uc);
ul = __sync_nand_and_fetch (&ul, uc);
sll = __sync_nand_and_fetch (&sll, uc);
ull = __sync_nand_and_fetch (&ull, uc);
}
void test_compare_and_swap (void)
{
sc = __sync_val_compare_and_swap (&sc, uc, sc);
uc = __sync_val_compare_and_swap (&uc, uc, sc);
ss = __sync_val_compare_and_swap (&ss, uc, sc);
us = __sync_val_compare_and_swap (&us, uc, sc);
si = __sync_val_compare_and_swap (&si, uc, sc);
ui = __sync_val_compare_and_swap (&ui, uc, sc);
sl = __sync_val_compare_and_swap (&sl, uc, sc);
ul = __sync_val_compare_and_swap (&ul, uc, sc);
sll = __sync_val_compare_and_swap (&sll, uc, sc);
ull = __sync_val_compare_and_swap (&ull, uc, sc);
ui = __sync_bool_compare_and_swap (&sc, uc, sc);
ui = __sync_bool_compare_and_swap (&uc, uc, sc);
ui = __sync_bool_compare_and_swap (&ss, uc, sc);
ui = __sync_bool_compare_and_swap (&us, uc, sc);
ui = __sync_bool_compare_and_swap (&si, uc, sc);
ui = __sync_bool_compare_and_swap (&ui, uc, sc);
ui = __sync_bool_compare_and_swap (&sl, uc, sc);
ui = __sync_bool_compare_and_swap (&ul, uc, sc);
ui = __sync_bool_compare_and_swap (&sll, uc, sc);
ui = __sync_bool_compare_and_swap (&ull, uc, sc);
}
void test_lock (void)
{
sc = __sync_lock_test_and_set (&sc, 1);
uc = __sync_lock_test_and_set (&uc, 1);
ss = __sync_lock_test_and_set (&ss, 1);
us = __sync_lock_test_and_set (&us, 1);
si = __sync_lock_test_and_set (&si, 1);
ui = __sync_lock_test_and_set (&ui, 1);
sl = __sync_lock_test_and_set (&sl, 1);
ul = __sync_lock_test_and_set (&ul, 1);
sll = __sync_lock_test_and_set (&sll, 1);
ull = __sync_lock_test_and_set (&ull, 1);
__sync_synchronize ();
__sync_lock_release (&sc);
__sync_lock_release (&uc);
__sync_lock_release (&ss);
__sync_lock_release (&us);
__sync_lock_release (&si);
__sync_lock_release (&ui);
__sync_lock_release (&sl);
__sync_lock_release (&ul);
__sync_lock_release (&sll);
__sync_lock_release (&ull);
}
/* Validate that the __sync builtins are overloaded properly. */
/* { dg-do compile } */
/* { dg-options "-Werror" } */
#define TEST1(TYPE, BUILTIN) \
void t_##TYPE##BUILTIN(TYPE *p) \
{ \
__typeof(BUILTIN(p, 1)) *pp; \
pp = p; \
}
#define TEST2(BUILTIN) \
TEST1(int, BUILTIN) \
TEST1(long, BUILTIN)
TEST2(__sync_fetch_and_add)
TEST2(__sync_fetch_and_sub)
TEST2(__sync_fetch_and_or)
TEST2(__sync_fetch_and_and)
TEST2(__sync_fetch_and_xor)
TEST2(__sync_fetch_and_nand)
TEST2(__sync_add_and_fetch)
TEST2(__sync_sub_and_fetch)
TEST2(__sync_or_and_fetch)
TEST2(__sync_and_and_fetch)
TEST2(__sync_xor_and_fetch)
TEST2(__sync_nand_and_fetch)
TEST2(__sync_lock_test_and_set)
#define TEST3(TYPE) \
void t_##TYPE##__sync_val_compare_and_swap(TYPE *p) \
{ \
__typeof(__sync_val_compare_and_swap(p, 1, 2)) *pp; \
pp = p; \
}
TEST3(int)
TEST3(long)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment