Commit 28875d67 by Richard Henderson Committed by Richard Henderson

ia64: Update to atomic optabs

        * config/ia64/ia64.c (ia64_expand_atomic_op): Add model parameter.
        Generate the barrier required for the memory model.
        (rtx_needs_barrier): Handle UNSPEC_FETCHADD_REL, UNSPEC_CMPXCHG_REL.
        * config/ia64/ia64-protos.h: Update.
        * config/ia64/ia64.md (UNSPEC_FETCHADD_REL): New.
        (UNSPEC_CMPXCHG_REL): New.
        * config/ia64/sync.md (mem_thread_fence): New.
        (atomic_load<IMODE>, atomic_store<IMODE>): New.
        (atomic_compare_and_swap<IMODE>): New.
        (cmpxchg_acq_<I124MODE>, cmpxchg_acq_di): New.
        (atomic_exchange<IMODE>): New.
        (xchg_acq_<IMODE>): Rename from sync_lock_test_and_set<IMODE>.
        (atomic_<FETCHOP><IMODE>, atomic_nand<IMODE>): New.
        (atomic_fetch_<FETCHOP><IMODE>, atomic_fetch_nand<IMODE>): New.
        (atomic_<FETCHOP>_fetch<IMODE>, atomic_nand_fetch<IMODE>): New.
        (fetchadd_rel_<I48MODE>): New.
        (sync_<FETCHOP><IMODE>, sync_nand<IMODE>): Remove.
        (sync_old_<FETCHOP><IMODE>, sync_old_nand<IMODE>): Remove.
        (sync_new_<FETCHOP><IMODE>, sync_new_nand<IMODE>): Remove.
        (sync_compare_and_swap<IMODE>): Remove.
        (sync_lock_release<IMODE>): Remove.

From-SVN: r181643
parent 127e44c8
2011-11-22 Richard Henderson <rth@redhat.com>
* config/ia64/ia64.c (ia64_expand_atomic_op): Add model parameter.
Generate the barrier required for the memory model.
(rtx_needs_barrier): Handle UNSPEC_FETCHADD_REL, UNSPEC_CMPXCHG_REL.
* config/ia64/ia64-protos.h: Update.
* config/ia64/ia64.md (UNSPEC_FETCHADD_REL): New.
(UNSPEC_CMPXCHG_REL): New.
* config/ia64/sync.md (mem_thread_fence): New.
(atomic_load<IMODE>, atomic_store<IMODE>): New.
(atomic_compare_and_swap<IMODE>): New.
(cmpxchg_acq_<I124MODE>, cmpxchg_acq_di): New.
(atomic_exchange<IMODE>): New.
(xchg_acq_<IMODE>): Rename from sync_lock_test_and_set<IMODE>.
(atomic_<FETCHOP><IMODE>, atomic_nand<IMODE>): New.
(atomic_fetch_<FETCHOP><IMODE>, atomic_fetch_nand<IMODE>): New.
(atomic_<FETCHOP>_fetch<IMODE>, atomic_nand_fetch<IMODE>): New.
(fetchadd_rel_<I48MODE>): New.
(sync_<FETCHOP><IMODE>, sync_nand<IMODE>): Remove.
(sync_old_<FETCHOP><IMODE>, sync_old_nand<IMODE>): Remove.
(sync_new_<FETCHOP><IMODE>, sync_new_nand<IMODE>): Remove.
(sync_compare_and_swap<IMODE>): Remove.
(sync_lock_release<IMODE>): Remove.
* config/ia64/ia64.md: Use define_e_enum for UNSPEC_*
and UNSPECV_* constants.
......@@ -47,7 +47,8 @@ extern void ia64_expand_dot_prod_v8qi (rtx[], bool);
extern void ia64_expand_call (rtx, rtx, rtx, int);
extern void ia64_split_call (rtx, rtx, rtx, rtx, rtx, int, int);
extern void ia64_reload_gp (void);
extern void ia64_expand_atomic_op (enum rtx_code, rtx, rtx, rtx, rtx);
extern void ia64_expand_atomic_op (enum rtx_code, rtx, rtx, rtx, rtx,
enum memmodel);
extern HOST_WIDE_INT ia64_initial_elimination_offset (int, int);
extern void ia64_expand_prologue (void);
......
......@@ -2266,7 +2266,7 @@ ia64_split_call (rtx retval, rtx addr, rtx retaddr, rtx scratch_r,
void
ia64_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
rtx old_dst, rtx new_dst)
rtx old_dst, rtx new_dst, enum memmodel model)
{
enum machine_mode mode = GET_MODE (mem);
rtx old_reg, new_reg, cmp_reg, ar_ccv, label;
......@@ -2283,12 +2283,31 @@ ia64_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
if (!old_dst)
old_dst = gen_reg_rtx (mode);
emit_insn (gen_memory_barrier ());
switch (model)
{
case MEMMODEL_ACQ_REL:
case MEMMODEL_SEQ_CST:
emit_insn (gen_memory_barrier ());
/* FALLTHRU */
case MEMMODEL_RELAXED:
case MEMMODEL_ACQUIRE:
case MEMMODEL_CONSUME:
if (mode == SImode)
icode = CODE_FOR_fetchadd_acq_si;
else
icode = CODE_FOR_fetchadd_acq_di;
break;
case MEMMODEL_RELEASE:
if (mode == SImode)
icode = CODE_FOR_fetchadd_rel_si;
else
icode = CODE_FOR_fetchadd_rel_di;
break;
default:
gcc_unreachable ();
}
if (mode == SImode)
icode = CODE_FOR_fetchadd_acq_si;
else
icode = CODE_FOR_fetchadd_acq_di;
emit_insn (GEN_FCN (icode) (old_dst, mem, val));
if (new_dst)
......@@ -2302,8 +2321,12 @@ ia64_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
}
/* Because of the volatile mem read, we get an ld.acq, which is the
front half of the full barrier. The end half is the cmpxchg.rel. */
gcc_assert (MEM_VOLATILE_P (mem));
front half of the full barrier. The end half is the cmpxchg.rel.
For relaxed and release memory models, we don't need this. But we
also don't bother trying to prevent it either. */
gcc_assert (model == MEMMODEL_RELAXED
|| model == MEMMODEL_RELEASE
|| MEM_VOLATILE_P (mem));
old_reg = gen_reg_rtx (DImode);
cmp_reg = gen_reg_rtx (DImode);
......@@ -2342,12 +2365,36 @@ ia64_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
if (new_dst)
emit_move_insn (new_dst, new_reg);
switch (mode)
switch (model)
{
case QImode: icode = CODE_FOR_cmpxchg_rel_qi; break;
case HImode: icode = CODE_FOR_cmpxchg_rel_hi; break;
case SImode: icode = CODE_FOR_cmpxchg_rel_si; break;
case DImode: icode = CODE_FOR_cmpxchg_rel_di; break;
case MEMMODEL_RELAXED:
case MEMMODEL_ACQUIRE:
case MEMMODEL_CONSUME:
switch (mode)
{
case QImode: icode = CODE_FOR_cmpxchg_acq_qi; break;
case HImode: icode = CODE_FOR_cmpxchg_acq_hi; break;
case SImode: icode = CODE_FOR_cmpxchg_acq_si; break;
case DImode: icode = CODE_FOR_cmpxchg_acq_di; break;
default:
gcc_unreachable ();
}
break;
case MEMMODEL_RELEASE:
case MEMMODEL_ACQ_REL:
case MEMMODEL_SEQ_CST:
switch (mode)
{
case QImode: icode = CODE_FOR_cmpxchg_rel_qi; break;
case HImode: icode = CODE_FOR_cmpxchg_rel_hi; break;
case SImode: icode = CODE_FOR_cmpxchg_rel_si; break;
case DImode: icode = CODE_FOR_cmpxchg_rel_di; break;
default:
gcc_unreachable ();
}
break;
default:
gcc_unreachable ();
}
......@@ -6342,6 +6389,7 @@ rtx_needs_barrier (rtx x, struct reg_flags flags, int pred)
case UNSPEC_PIC_CALL:
case UNSPEC_MF:
case UNSPEC_FETCHADD_ACQ:
case UNSPEC_FETCHADD_REL:
case UNSPEC_BSP_VALUE:
case UNSPEC_FLUSHRS:
case UNSPEC_BUNDLE_SELECTOR:
......@@ -6385,6 +6433,7 @@ rtx_needs_barrier (rtx x, struct reg_flags flags, int pred)
break;
case UNSPEC_CMPXCHG_ACQ:
case UNSPEC_CMPXCHG_REL:
need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 2), flags, pred);
break;
......
......@@ -68,7 +68,9 @@
UNSPEC_PIC_CALL
UNSPEC_MF
UNSPEC_CMPXCHG_ACQ
UNSPEC_CMPXCHG_REL
UNSPEC_FETCHADD_ACQ
UNSPEC_FETCHADD_REL
UNSPEC_BSP_VALUE
UNSPEC_FLUSHRS
UNSPEC_BUNDLE_SELECTOR
......
......@@ -18,6 +18,9 @@
;; along with GCC; see the file COPYING3. If not see
;; <http://www.gnu.org/licenses/>.
;; Conversion to C++11 memory model based on
;; http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
(define_mode_iterator IMODE [QI HI SI DI])
(define_mode_iterator I124MODE [QI HI SI])
(define_mode_iterator I48MODE [SI DI])
......@@ -27,6 +30,15 @@
(define_code_attr fetchop_name
[(plus "add") (minus "sub") (ior "ior") (xor "xor") (and "and")])
(define_expand "mem_thread_fence"
[(match_operand:SI 0 "const_int_operand" "")] ;; model
""
{
if (INTVAL (operands[0]) == MEMMODEL_SEQ_CST)
emit_insn (gen_memory_barrier ());
DONE;
})
(define_expand "memory_barrier"
[(set (match_dup 0)
(unspec:BLK [(match_dup 0)] UNSPEC_MF))]
......@@ -43,107 +55,94 @@
"mf"
[(set_attr "itanium_class" "syst_m")])
(define_insn "fetchadd_acq_<mode>"
[(set (match_operand:I48MODE 0 "gr_register_operand" "=r")
(match_operand:I48MODE 1 "not_postinc_memory_operand" "+S"))
(set (match_dup 1)
(unspec:I48MODE [(match_dup 1)
(match_operand:I48MODE 2 "fetchadd_operand" "n")]
UNSPEC_FETCHADD_ACQ))]
""
"fetchadd<modesuffix>.acq %0 = %1, %2"
[(set_attr "itanium_class" "sem")])
(define_expand "sync_<fetchop_name><mode>"
[(set (match_operand:IMODE 0 "memory_operand" "")
(FETCHOP:IMODE (match_dup 0)
(match_operand:IMODE 1 "general_operand" "")))]
""
{
ia64_expand_atomic_op (<CODE>, operands[0], operands[1], NULL, NULL);
DONE;
})
(define_expand "sync_nand<mode>"
[(set (match_operand:IMODE 0 "memory_operand" "")
(not:IMODE
(and:IMODE (match_dup 0)
(match_operand:IMODE 1 "general_operand" ""))))]
(define_expand "atomic_load<mode>"
[(match_operand:IMODE 0 "gr_register_operand" "") ;; output
(match_operand:IMODE 1 "memory_operand" "") ;; memory
(match_operand:SI 2 "const_int_operand" "")] ;; model
""
{
ia64_expand_atomic_op (NOT, operands[0], operands[1], NULL, NULL);
DONE;
})
enum memmodel model = (enum memmodel) INTVAL (operands[2]);
(define_expand "sync_old_<fetchop_name><mode>"
[(set (match_operand:IMODE 0 "gr_register_operand" "")
(FETCHOP:IMODE
(match_operand:IMODE 1 "memory_operand" "")
(match_operand:IMODE 2 "general_operand" "")))]
""
{
ia64_expand_atomic_op (<CODE>, operands[1], operands[2], operands[0], NULL);
/* Unless the memory model is relaxed, we want to emit ld.acq, which
will happen automatically for volatile memories. */
gcc_assert (model == MEMMODEL_RELAXED || MEM_VOLATILE_P (operands[1]));
emit_move_insn (operands[0], operands[1]);
DONE;
})
(define_expand "sync_old_nand<mode>"
[(set (match_operand:IMODE 0 "gr_register_operand" "")
(not:IMODE
(and:IMODE (match_operand:IMODE 1 "memory_operand" "")
(match_operand:IMODE 2 "general_operand" ""))))]
(define_expand "atomic_store<mode>"
[(match_operand:IMODE 0 "memory_operand" "") ;; memory
(match_operand:IMODE 1 "gr_reg_or_0_operand" "") ;; input
(match_operand:SI 2 "const_int_operand" "")] ;; model
""
{
ia64_expand_atomic_op (NOT, operands[1], operands[2], operands[0], NULL);
DONE;
})
enum memmodel model = (enum memmodel) INTVAL (operands[2]);
(define_expand "sync_new_<fetchop_name><mode>"
[(set (match_operand:IMODE 0 "gr_register_operand" "")
(FETCHOP:IMODE
(match_operand:IMODE 1 "memory_operand" "")
(match_operand:IMODE 2 "general_operand" "")))]
""
{
ia64_expand_atomic_op (<CODE>, operands[1], operands[2], NULL, operands[0]);
DONE;
})
/* Unless the memory model is relaxed, we want to emit st.rel, which
will happen automatically for volatile memories. */
gcc_assert (model == MEMMODEL_RELAXED || MEM_VOLATILE_P (operands[0]));
emit_move_insn (operands[0], operands[1]);
(define_expand "sync_new_nand<mode>"
[(set (match_operand:IMODE 0 "gr_register_operand" "")
(not:IMODE
(and:IMODE (match_operand:IMODE 1 "memory_operand" "")
(match_operand:IMODE 2 "general_operand" ""))))]
""
{
ia64_expand_atomic_op (NOT, operands[1], operands[2], NULL, operands[0]);
/* Sequentially consistent stores need a subsequent MF. See
http://www.decadent.org.uk/pipermail/cpp-threads/2008-December/001952.html
for a discussion of why a MF is needed here, but not for atomic_load. */
if (model == MEMMODEL_SEQ_CST)
emit_insn (gen_memory_barrier ());
DONE;
})
(define_expand "sync_compare_and_swap<mode>"
[(match_operand:IMODE 0 "gr_register_operand" "")
(match_operand:IMODE 1 "memory_operand" "")
(match_operand:IMODE 2 "gr_register_operand" "")
(match_operand:IMODE 3 "gr_register_operand" "")]
(define_expand "atomic_compare_and_swap<mode>"
[(match_operand:DI 0 "gr_register_operand" "") ;; bool out
(match_operand:IMODE 1 "gr_register_operand" "") ;; val out
(match_operand:IMODE 2 "not_postinc_memory_operand" "") ;; memory
(match_operand:IMODE 3 "gr_register_operand" "") ;; expected
(match_operand:IMODE 4 "gr_reg_or_0_operand" "") ;; desired
(match_operand:SI 5 "const_int_operand" "") ;; is_weak
(match_operand:SI 6 "const_int_operand" "") ;; succ model
(match_operand:SI 7 "const_int_operand" "")] ;; fail model
""
{
enum memmodel model = (enum memmodel) INTVAL (operands[6]);
rtx ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
rtx dst;
rtx dval, eval;
eval = gen_reg_rtx (DImode);
convert_move (eval, operands[3], 1);
emit_move_insn (ccv, eval);
convert_move (ccv, operands[2], 1);
if (<MODE>mode == DImode)
dval = operands[1];
else
dval = gen_reg_rtx (DImode);
dst = operands[0];
if (GET_MODE (dst) != DImode)
dst = gen_reg_rtx (DImode);
switch (model)
{
case MEMMODEL_RELAXED:
case MEMMODEL_ACQUIRE:
case MEMMODEL_CONSUME:
emit_insn (gen_cmpxchg_acq_<mode> (dval, operands[2], ccv, operands[4]));
break;
case MEMMODEL_RELEASE:
emit_insn (gen_cmpxchg_rel_<mode> (dval, operands[2], ccv, operands[4]));
break;
case MEMMODEL_ACQ_REL:
case MEMMODEL_SEQ_CST:
emit_insn (gen_cmpxchg_rel_<mode> (dval, operands[2], ccv, operands[4]));
emit_insn (gen_memory_barrier ());
break;
default:
gcc_unreachable ();
}
emit_insn (gen_cmpxchg_rel_<mode> (dst, operands[1], ccv, operands[3]));
emit_insn (gen_memory_barrier ());
if (<MODE>mode != DImode)
emit_move_insn (operands[1], gen_lowpart (<MODE>mode, dval));
if (dst != operands[0])
emit_move_insn (operands[0], gen_lowpart (<MODE>mode, dst));
emit_insn (gen_cstoredi4 (operands[0], gen_rtx_EQ (DImode, dval, eval),
dval, eval));
DONE;
})
(define_insn "cmpxchg_rel_<mode>"
(define_insn "cmpxchg_acq_<mode>"
[(set (match_operand:DI 0 "gr_register_operand" "=r")
(zero_extend:DI
(match_operand:I124MODE 1 "not_postinc_memory_operand" "+S")))
......@@ -154,10 +153,24 @@
(match_operand:I124MODE 3 "gr_reg_or_0_operand" "rO")]
UNSPEC_CMPXCHG_ACQ))]
""
"cmpxchg<modesuffix>.acq %0 = %1, %r3, %2"
[(set_attr "itanium_class" "sem")])
(define_insn "cmpxchg_rel_<mode>"
[(set (match_operand:DI 0 "gr_register_operand" "=r")
(zero_extend:DI
(match_operand:I124MODE 1 "not_postinc_memory_operand" "+S")))
(set (match_dup 1)
(unspec:I124MODE
[(match_dup 1)
(match_operand:DI 2 "ar_ccv_reg_operand" "")
(match_operand:I124MODE 3 "gr_reg_or_0_operand" "rO")]
UNSPEC_CMPXCHG_REL))]
""
"cmpxchg<modesuffix>.rel %0 = %1, %r3, %2"
[(set_attr "itanium_class" "sem")])
(define_insn "cmpxchg_rel_di"
(define_insn "cmpxchg_acq_di"
[(set (match_operand:DI 0 "gr_register_operand" "=r")
(match_operand:DI 1 "not_postinc_memory_operand" "+S"))
(set (match_dup 1)
......@@ -166,10 +179,50 @@
(match_operand:DI 3 "gr_reg_or_0_operand" "rO")]
UNSPEC_CMPXCHG_ACQ))]
""
"cmpxchg8.acq %0 = %1, %r3, %2"
[(set_attr "itanium_class" "sem")])
(define_insn "cmpxchg_rel_di"
[(set (match_operand:DI 0 "gr_register_operand" "=r")
(match_operand:DI 1 "not_postinc_memory_operand" "+S"))
(set (match_dup 1)
(unspec:DI [(match_dup 1)
(match_operand:DI 2 "ar_ccv_reg_operand" "")
(match_operand:DI 3 "gr_reg_or_0_operand" "rO")]
UNSPEC_CMPXCHG_REL))]
""
"cmpxchg8.rel %0 = %1, %r3, %2"
[(set_attr "itanium_class" "sem")])
(define_insn "sync_lock_test_and_set<mode>"
(define_expand "atomic_exchange<mode>"
[(match_operand:IMODE 0 "gr_register_operand" "") ;; output
(match_operand:IMODE 1 "not_postinc_memory_operand" "") ;; memory
(match_operand:IMODE 2 "gr_reg_or_0_operand" "") ;; input
(match_operand:SI 3 "const_int_operand" "")] ;; succ model
""
{
enum memmodel model = (enum memmodel) INTVAL (operands[3]);
switch (model)
{
case MEMMODEL_RELAXED:
case MEMMODEL_ACQUIRE:
case MEMMODEL_CONSUME:
break;
case MEMMODEL_RELEASE:
case MEMMODEL_ACQ_REL:
case MEMMODEL_SEQ_CST:
emit_insn (gen_memory_barrier ());
break;
default:
gcc_unreachable ();
}
emit_insn (gen_xchg_acq_<mode> (operands[0], operands[1], operands[2]));
DONE;
})
;; Note that XCHG is always memory model acquire.
(define_insn "xchg_acq_<mode>"
[(set (match_operand:IMODE 0 "gr_register_operand" "=r")
(match_operand:IMODE 1 "not_postinc_memory_operand" "+S"))
(set (match_dup 1)
......@@ -178,10 +231,101 @@
"xchg<modesuffix> %0 = %1, %r2"
[(set_attr "itanium_class" "sem")])
(define_expand "sync_lock_release<mode>"
(define_expand "atomic_<fetchop_name><mode>"
[(set (match_operand:IMODE 0 "memory_operand" "")
(match_operand:IMODE 1 "gr_reg_or_0_operand" ""))]
(FETCHOP:IMODE (match_dup 0)
(match_operand:IMODE 1 "nonmemory_operand" "")))
(use (match_operand:SI 2 "const_int_operand" ""))]
""
{
ia64_expand_atomic_op (<CODE>, operands[0], operands[1], NULL, NULL,
(enum memmodel) INTVAL (operands[2]));
DONE;
})
(define_expand "atomic_nand<mode>"
[(set (match_operand:IMODE 0 "memory_operand" "")
(not:IMODE
(and:IMODE (match_dup 0)
(match_operand:IMODE 1 "nonmemory_operand" ""))))
(use (match_operand:SI 2 "const_int_operand" ""))]
""
{
ia64_expand_atomic_op (NOT, operands[0], operands[1], NULL, NULL,
(enum memmodel) INTVAL (operands[2]));
DONE;
})
(define_expand "atomic_fetch_<fetchop_name><mode>"
[(set (match_operand:IMODE 0 "gr_register_operand" "")
(FETCHOP:IMODE
(match_operand:IMODE 1 "memory_operand" "")
(match_operand:IMODE 2 "nonmemory_operand" "")))
(use (match_operand:SI 3 "const_int_operand" ""))]
""
{
gcc_assert (MEM_VOLATILE_P (operands[0]));
ia64_expand_atomic_op (<CODE>, operands[1], operands[2], operands[0], NULL,
(enum memmodel) INTVAL (operands[3]));
DONE;
})
(define_expand "atomic_fetch_nand<mode>"
[(set (match_operand:IMODE 0 "gr_register_operand" "")
(not:IMODE
(and:IMODE (match_operand:IMODE 1 "memory_operand" "")
(match_operand:IMODE 2 "nonmemory_operand" ""))))
(use (match_operand:SI 3 "const_int_operand" ""))]
""
{
ia64_expand_atomic_op (NOT, operands[1], operands[2], operands[0], NULL,
(enum memmodel) INTVAL (operands[3]));
DONE;
})
(define_expand "atomic_<fetchop_name>_fetch<mode>"
[(set (match_operand:IMODE 0 "gr_register_operand" "")
(FETCHOP:IMODE
(match_operand:IMODE 1 "memory_operand" "")
(match_operand:IMODE 2 "nonmemory_operand" "")))
(use (match_operand:SI 3 "const_int_operand" ""))]
""
{
ia64_expand_atomic_op (<CODE>, operands[1], operands[2], NULL, operands[0],
(enum memmodel) INTVAL (operands[3]));
DONE;
})
(define_expand "atomic_nand_fetch<mode>"
[(set (match_operand:IMODE 0 "gr_register_operand" "")
(not:IMODE
(and:IMODE (match_operand:IMODE 1 "memory_operand" "")
(match_operand:IMODE 2 "nonmemory_operand" ""))))
(use (match_operand:SI 3 "const_int_operand" ""))]
""
{
ia64_expand_atomic_op (NOT, operands[1], operands[2], NULL, operands[0],
(enum memmodel) INTVAL (operands[3]));
DONE;
})
(define_insn "fetchadd_acq_<mode>"
[(set (match_operand:I48MODE 0 "gr_register_operand" "=r")
(match_operand:I48MODE 1 "not_postinc_memory_operand" "+S"))
(set (match_dup 1)
(unspec:I48MODE [(match_dup 1)
(match_operand:I48MODE 2 "fetchadd_operand" "n")]
UNSPEC_FETCHADD_ACQ))]
""
"fetchadd<modesuffix>.acq %0 = %1, %2"
[(set_attr "itanium_class" "sem")])
(define_insn "fetchadd_rel_<mode>"
[(set (match_operand:I48MODE 0 "gr_register_operand" "=r")
(match_operand:I48MODE 1 "not_postinc_memory_operand" "+S"))
(set (match_dup 1)
(unspec:I48MODE [(match_dup 1)
(match_operand:I48MODE 2 "fetchadd_operand" "n")]
UNSPEC_FETCHADD_REL))]
""
"fetchadd<modesuffix>.rel %0 = %1, %2"
[(set_attr "itanium_class" "sem")])
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment