Commit 99400eed by Ulrich Weigand Committed by Ulrich Weigand

spu-protos.h (spu_expand_atomic_op): Add prototype.

gcc/
	* config/spu/spu-protos.h (spu_expand_atomic_op): Add prototype.
	* config/spu/spu.c (spu_expand_atomic_op): New function.
	* config/spu/spu.md (AINT): New mode iterator.
	(ATOMIC): New code iterator.
	(atomic_name, atomic_pred): New code predicates.
	("atomic_load<mode>", "atomic_store<mode>"): New expanders.
	("atomic_compare_and_swap<mode>", "atomic_exchange<mode>"): Likewise.
	(""atomic_<atomic_name><mode>", "atomic_fetch_<atomic_name><mode>",
	"atomic_<atomic_name>_fetch<mode>"): Likewise.

gcc/testsuite/
	* lib/target-supports.exp (check_effective_target_sync_int_128):
	Return 1 on spu-*-* targets.
	(check_effective_target_sync_int_128_runtime): Likewise.
	(check_effective_target_sync_long_long): Likewise.
	(check_effective_target_sync_long_long_runtime): Likewise.
	(check_effective_target_sync_int_long): Likewise.
	(check_effective_target_sync_char_short): Likewise.

From-SVN: r228297
parent 21b82064
2015-09-30 Ulrich Weigand <Ulrich.Weigand@de.ibm.com>
* config/spu/spu-protos.h (spu_expand_atomic_op): Add prototype.
* config/spu/spu.c (spu_expand_atomic_op): New function.
* config/spu/spu.md (AINT): New mode iterator.
(ATOMIC): New code iterator.
(atomic_name, atomic_pred): New code predicates.
("atomic_load<mode>", "atomic_store<mode>"): New expanders.
("atomic_compare_and_swap<mode>", "atomic_exchange<mode>"): Likewise.
(""atomic_<atomic_name><mode>", "atomic_fetch_<atomic_name><mode>",
"atomic_<atomic_name>_fetch<mode>"): Likewise.
2015-09-30 Ilya Enkovich <enkovich.gnu@gmail.com>
* config/i386/i386.c (scalar_chain::analyze_register_chain): Ignore
......@@ -76,6 +76,8 @@ extern void spu_builtin_promote (rtx ops[]);
extern void spu_expand_sign_extend (rtx ops[]);
extern void spu_expand_vector_init (rtx target, rtx vals);
extern rtx spu_legitimize_reload_address (rtx, machine_mode, int, int);
extern void spu_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
rtx orig_before, rtx orig_after);
#endif /* RTX_CODE */
extern void spu_init_expanders (void);
......
......@@ -7121,6 +7121,41 @@ spu_canonicalize_comparison (int *code, rtx *op0, rtx *op1,
*code = (int)swap_condition ((enum rtx_code)*code);
}
}
/* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
to perform. MEM is the memory on which to operate. VAL is the second
operand of the binary operator. BEFORE and AFTER are optional locations to
return the value of MEM either before of after the operation. */
void
spu_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
rtx orig_before, rtx orig_after)
{
machine_mode mode = GET_MODE (mem);
rtx before = orig_before, after = orig_after;
if (before == NULL_RTX)
before = gen_reg_rtx (mode);
emit_move_insn (before, mem);
if (code == MULT) /* NAND operation */
{
rtx x = expand_simple_binop (mode, AND, before, val,
NULL_RTX, 1, OPTAB_LIB_WIDEN);
after = expand_simple_unop (mode, NOT, x, after, 1);
}
else
{
after = expand_simple_binop (mode, code, before, val,
after, 1, OPTAB_LIB_WIDEN);
}
emit_move_insn (mem, after);
if (orig_after && after != orig_after)
emit_move_insn (orig_after, after);
}
/* Table of machine attributes. */
static const struct attribute_spec spu_attribute_table[] =
......
......@@ -5097,3 +5097,150 @@ DONE;
(set_attr "type" "multi1")]
)
; Atomic operations
;
; SPU execution is always single-threaded, so there is no need for real
; atomic operations. We provide the atomic primitives anyway so that
; code expecting the builtins to be present (like libgfortran) will work.
;; Types that we should provide atomic instructions for.
(define_mode_iterator AINT [QI HI SI DI TI])
(define_code_iterator ATOMIC [plus minus ior xor and mult])
(define_code_attr atomic_name
[(plus "add") (minus "sub")
(ior "or") (xor "xor") (and "and") (mult "nand")])
(define_code_attr atomic_pred
[(plus "spu_arith_operand") (minus "spu_reg_operand")
(ior "spu_logical_operand") (xor "spu_logical_operand")
(and "spu_logical_operand") (mult "spu_logical_operand")])
(define_expand "atomic_load<mode>"
[(set (match_operand:AINT 0 "spu_reg_operand" "") ;; output
(match_operand:AINT 1 "memory_operand" "")) ;; memory
(use (match_operand:SI 2 "const_int_operand" ""))] ;; model
""
{
if (MEM_ADDR_SPACE (operands[1]))
FAIL;
emit_move_insn (operands[0], operands[1]);
DONE;
})
(define_expand "atomic_store<mode>"
[(set (match_operand:AINT 0 "memory_operand" "") ;; memory
(match_operand:AINT 1 "spu_reg_operand" "")) ;; input
(use (match_operand:SI 2 "const_int_operand" ""))] ;; model
""
{
if (MEM_ADDR_SPACE (operands[0]))
FAIL;
emit_move_insn (operands[0], operands[1]);
DONE;
})
(define_expand "atomic_compare_and_swap<mode>"
[(match_operand:SI 0 "spu_reg_operand" "") ;; bool out
(match_operand:AINT 1 "spu_reg_operand" "") ;; val out
(match_operand:AINT 2 "memory_operand" "") ;; memory
(match_operand:AINT 3 "spu_nonmem_operand" "") ;; expected
(match_operand:AINT 4 "spu_nonmem_operand" "") ;; desired
(match_operand:SI 5 "const_int_operand" "") ;; is_weak
(match_operand:SI 6 "const_int_operand" "") ;; model succ
(match_operand:SI 7 "const_int_operand" "")] ;; model fail
""
{
rtx boolval, retval, label;
if (MEM_ADDR_SPACE (operands[2]))
FAIL;
boolval = gen_reg_rtx (SImode);
retval = gen_reg_rtx (<MODE>mode);
label = gen_label_rtx ();
emit_move_insn (retval, operands[2]);
emit_move_insn (boolval, const0_rtx);
emit_cmp_and_jump_insns (retval, operands[3], NE, NULL_RTX,
<MODE>mode, 1, label);
emit_move_insn (operands[2], operands[4]);
emit_move_insn (boolval, const1_rtx);
emit_label (label);
emit_move_insn (operands[0], boolval);
emit_move_insn (operands[1], retval);
DONE;
})
(define_expand "atomic_exchange<mode>"
[(match_operand:AINT 0 "spu_reg_operand" "") ;; output
(match_operand:AINT 1 "memory_operand" "") ;; memory
(match_operand:AINT 2 "spu_nonmem_operand" "") ;; input
(match_operand:SI 3 "const_int_operand" "")] ;; model
""
{
rtx retval;
if (MEM_ADDR_SPACE (operands[1]))
FAIL;
retval = gen_reg_rtx (<MODE>mode);
emit_move_insn (retval, operands[1]);
emit_move_insn (operands[1], operands[2]);
emit_move_insn (operands[0], retval);
DONE;
})
(define_expand "atomic_<atomic_name><mode>"
[(ATOMIC:AINT
(match_operand:AINT 0 "memory_operand" "") ;; memory
(match_operand:AINT 1 "<atomic_pred>" "")) ;; operand
(match_operand:SI 2 "const_int_operand" "")] ;; model
""
{
if (MEM_ADDR_SPACE (operands[0]))
FAIL;
spu_expand_atomic_op (<CODE>, operands[0], operands[1],
NULL_RTX, NULL_RTX);
DONE;
})
(define_expand "atomic_fetch_<atomic_name><mode>"
[(match_operand:AINT 0 "spu_reg_operand" "") ;; output
(ATOMIC:AINT
(match_operand:AINT 1 "memory_operand" "") ;; memory
(match_operand:AINT 2 "<atomic_pred>" "")) ;; operand
(match_operand:SI 3 "const_int_operand" "")] ;; model
""
{
if (MEM_ADDR_SPACE (operands[1]))
FAIL;
spu_expand_atomic_op (<CODE>, operands[1], operands[2],
operands[0], NULL_RTX);
DONE;
})
(define_expand "atomic_<atomic_name>_fetch<mode>"
[(match_operand:AINT 0 "spu_reg_operand" "") ;; output
(ATOMIC:AINT
(match_operand:AINT 1 "memory_operand" "") ;; memory
(match_operand:AINT 2 "<atomic_pred>" "")) ;; operand
(match_operand:SI 3 "const_int_operand" "")] ;; model
""
{
if (MEM_ADDR_SPACE (operands[1]))
FAIL;
spu_expand_atomic_op (<CODE>, operands[1], operands[2],
NULL_RTX, operands[0]);
DONE;
})
2015-09-30 Ulrich Weigand <Ulrich.Weigand@de.ibm.com>
* lib/target-supports.exp (check_effective_target_sync_int_128):
Return 1 on spu-*-* targets.
(check_effective_target_sync_int_128_runtime): Likewise.
(check_effective_target_sync_long_long): Likewise.
(check_effective_target_sync_long_long_runtime): Likewise.
(check_effective_target_sync_int_long): Likewise.
(check_effective_target_sync_char_short): Likewise.
2015-09-30 Ilya Enkovich <enkovich.gnu@gmail.com>
* gcc.target/i386/pr67761.c: New test.
......
......@@ -5092,6 +5092,8 @@ proc check_effective_target_sync_int_128 { } {
if { ([istarget x86_64-*-*] || [istarget i?86-*-*])
&& ![is-effective-target ia32] } {
return 1
} elseif { [istarget spu-*-*] } {
return 1
} else {
return 0
}
......@@ -5115,6 +5117,8 @@ proc check_effective_target_sync_int_128_runtime { } {
}
} ""
}]
} elseif { [istarget spu-*-*] } {
return 1
} else {
return 0
}
......@@ -5129,7 +5133,8 @@ proc check_effective_target_sync_long_long { } {
|| [istarget aarch64*-*-*]
|| [istarget arm*-*-*]
|| [istarget alpha*-*-*]
|| ([istarget sparc*-*-*] && [check_effective_target_lp64]) } {
|| ([istarget sparc*-*-*] && [check_effective_target_lp64])
|| [istarget spu-*-*] } {
return 1
} else {
return 0
......@@ -5179,6 +5184,8 @@ proc check_effective_target_sync_long_long_runtime { } {
&& [check_effective_target_lp64]
&& [check_effective_target_ultrasparc_hw]) } {
return 1
} elseif { [istarget spu-*-*] } {
return 1
} elseif { [istarget powerpc*-*-*] && [check_effective_target_lp64] } {
return 1
} else {
......@@ -5292,6 +5299,7 @@ proc check_effective_target_sync_int_long { } {
|| [istarget powerpc*-*-*]
|| [istarget crisv32-*-*] || [istarget cris-*-*]
|| ([istarget sparc*-*-*] && [check_effective_target_sparc_v9])
|| [istarget spu-*-*]
|| [check_effective_target_mips_llsc] } {
set et_sync_int_long_saved 1
}
......@@ -5322,6 +5330,7 @@ proc check_effective_target_sync_char_short { } {
|| [istarget powerpc*-*-*]
|| [istarget crisv32-*-*] || [istarget cris-*-*]
|| ([istarget sparc*-*-*] && [check_effective_target_sparc_v9])
|| [istarget spu-*-*]
|| [check_effective_target_mips_llsc] } {
set et_sync_char_short_saved 1
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment