Commit ab876106 by Matthew Wahab Committed by Matthew Wahab

re PR target/65697 (__atomic memory barriers not strong enough for __sync builtins)

	PR target/65697
	* config/aarch64/aarch64.c (aarch64_split_compare_and_swap): Check
	for __sync memory models, emit initial loads and final barriers as
	appropriate.

From-SVN: r223984
parent f70fb3b6
2015-06-01 Matthew Wahab <matthew.wahab@arm.com> 2015-06-01 Matthew Wahab <matthew.wahab@arm.com>
PR target/65697
* config/aarch64/aarch64.c (aarch64_split_compare_and_swap): Check
for __sync memory models, emit initial loads and final barriers as
appropriate.
2015-06-01 Matthew Wahab <matthew.wahab@arm.com>
PR target/65697 PR target/65697
* config/aarch64/aarch64.c (aarch64_emit_post_barrier):New. * config/aarch64/aarch64.c (aarch64_emit_post_barrier):New.
(aarch64_split_atomic_op): Check for __sync memory models, emit (aarch64_split_atomic_op): Check for __sync memory models, emit
......
...@@ -9436,14 +9436,18 @@ aarch64_split_compare_and_swap (rtx operands[]) ...@@ -9436,14 +9436,18 @@ aarch64_split_compare_and_swap (rtx operands[])
bool is_weak; bool is_weak;
rtx_code_label *label1, *label2; rtx_code_label *label1, *label2;
rtx x, cond; rtx x, cond;
enum memmodel model;
rtx model_rtx;
rval = operands[0]; rval = operands[0];
mem = operands[1]; mem = operands[1];
oldval = operands[2]; oldval = operands[2];
newval = operands[3]; newval = operands[3];
is_weak = (operands[4] != const0_rtx); is_weak = (operands[4] != const0_rtx);
model_rtx = operands[5];
scratch = operands[7]; scratch = operands[7];
mode = GET_MODE (mem); mode = GET_MODE (mem);
model = memmodel_from_int (INTVAL (model_rtx));
label1 = NULL; label1 = NULL;
if (!is_weak) if (!is_weak)
...@@ -9453,7 +9457,13 @@ aarch64_split_compare_and_swap (rtx operands[]) ...@@ -9453,7 +9457,13 @@ aarch64_split_compare_and_swap (rtx operands[])
} }
label2 = gen_label_rtx (); label2 = gen_label_rtx ();
aarch64_emit_load_exclusive (mode, rval, mem, operands[5]); /* The initial load can be relaxed for a __sync operation since a final
barrier will be emitted to stop code hoisting. */
if (is_mm_sync (model))
aarch64_emit_load_exclusive (mode, rval, mem,
GEN_INT (MEMMODEL_RELAXED));
else
aarch64_emit_load_exclusive (mode, rval, mem, model_rtx);
cond = aarch64_gen_compare_reg (NE, rval, oldval); cond = aarch64_gen_compare_reg (NE, rval, oldval);
x = gen_rtx_NE (VOIDmode, cond, const0_rtx); x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
...@@ -9461,7 +9471,7 @@ aarch64_split_compare_and_swap (rtx operands[]) ...@@ -9461,7 +9471,7 @@ aarch64_split_compare_and_swap (rtx operands[])
gen_rtx_LABEL_REF (Pmode, label2), pc_rtx); gen_rtx_LABEL_REF (Pmode, label2), pc_rtx);
aarch64_emit_unlikely_jump (gen_rtx_SET (pc_rtx, x)); aarch64_emit_unlikely_jump (gen_rtx_SET (pc_rtx, x));
aarch64_emit_store_exclusive (mode, scratch, mem, newval, operands[5]); aarch64_emit_store_exclusive (mode, scratch, mem, newval, model_rtx);
if (!is_weak) if (!is_weak)
{ {
...@@ -9478,6 +9488,10 @@ aarch64_split_compare_and_swap (rtx operands[]) ...@@ -9478,6 +9488,10 @@ aarch64_split_compare_and_swap (rtx operands[])
} }
emit_label (label2); emit_label (label2);
/* Emit any final barrier needed for a __sync operation. */
if (is_mm_sync (model))
aarch64_emit_post_barrier (model);
} }
/* Split an atomic operation. */ /* Split an atomic operation. */
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment