Commit 0100c5f9 by Richard Sandiford Committed by Richard Sandiford

[AArch64] Move code around

This patch simply moves code around, in order to make the later
patches easier to read, and to avoid forward declarations.
It doesn't add the missing function comments because the interfaces
will change in a later patch.

2017-11-01  Richard Sandiford  <richard.sandiford@linaro.org>
	    Alan Hayward  <alan.hayward@arm.com>
	    David Sherwood  <david.sherwood@arm.com>

gcc/
	* config/aarch64/aarch64.c (aarch64_add_constant_internal)
	(aarch64_add_constant, aarch64_add_sp, aarch64_sub_sp): Move
	earlier in file.

Reviewed-by: James Greenhalgh <james.greenhalgh@arm.com>

Co-Authored-By: Alan Hayward <alan.hayward@arm.com>
Co-Authored-By: David Sherwood <david.sherwood@arm.com>

From-SVN: r254325
parent 3f8334a5
...@@ -2,6 +2,14 @@ ...@@ -2,6 +2,14 @@
Alan Hayward <alan.hayward@arm.com> Alan Hayward <alan.hayward@arm.com>
David Sherwood <david.sherwood@arm.com> David Sherwood <david.sherwood@arm.com>
* config/aarch64/aarch64.c (aarch64_add_constant_internal)
(aarch64_add_constant, aarch64_add_sp, aarch64_sub_sp): Move
earlier in file.
2017-11-01 Richard Sandiford <richard.sandiford@linaro.org>
Alan Hayward <alan.hayward@arm.com>
David Sherwood <david.sherwood@arm.com>
* config/aarch64/aarch64.c (aarch64_evpc_trn, aarch64_evpc_uzp) * config/aarch64/aarch64.c (aarch64_evpc_trn, aarch64_evpc_uzp)
(aarch64_evpc_zip, aarch64_evpc_ext, aarch64_evpc_rev) (aarch64_evpc_zip, aarch64_evpc_ext, aarch64_evpc_rev)
(aarch64_evpc_dup): Generate rtl direcly, rather than using (aarch64_evpc_dup): Generate rtl direcly, rather than using
...@@ -1964,6 +1964,87 @@ aarch64_internal_mov_immediate (rtx dest, rtx imm, bool generate, ...@@ -1964,6 +1964,87 @@ aarch64_internal_mov_immediate (rtx dest, rtx imm, bool generate,
return num_insns; return num_insns;
} }
/* Add DELTA to REGNUM in mode MODE. SCRATCHREG can be used to hold a
temporary value if necessary. FRAME_RELATED_P should be true if
the RTX_FRAME_RELATED flag should be set and CFA adjustments added
to the generated instructions. If SCRATCHREG is known to hold
abs (delta), EMIT_MOVE_IMM can be set to false to avoid emitting the
immediate again.
Since this function may be used to adjust the stack pointer, we must
ensure that it cannot cause transient stack deallocation (for example
by first incrementing SP and then decrementing when adjusting by a
large immediate). */
static void
aarch64_add_constant_internal (scalar_int_mode mode, int regnum,
int scratchreg, HOST_WIDE_INT delta,
bool frame_related_p, bool emit_move_imm)
{
HOST_WIDE_INT mdelta = abs_hwi (delta);
rtx this_rtx = gen_rtx_REG (mode, regnum);
rtx_insn *insn;
if (!mdelta)
return;
/* Single instruction adjustment. */
if (aarch64_uimm12_shift (mdelta))
{
insn = emit_insn (gen_add2_insn (this_rtx, GEN_INT (delta)));
RTX_FRAME_RELATED_P (insn) = frame_related_p;
return;
}
/* Emit 2 additions/subtractions if the adjustment is less than 24 bits.
Only do this if mdelta is not a 16-bit move as adjusting using a move
is better. */
if (mdelta < 0x1000000 && !aarch64_move_imm (mdelta, mode))
{
HOST_WIDE_INT low_off = mdelta & 0xfff;
low_off = delta < 0 ? -low_off : low_off;
insn = emit_insn (gen_add2_insn (this_rtx, GEN_INT (low_off)));
RTX_FRAME_RELATED_P (insn) = frame_related_p;
insn = emit_insn (gen_add2_insn (this_rtx, GEN_INT (delta - low_off)));
RTX_FRAME_RELATED_P (insn) = frame_related_p;
return;
}
/* Emit a move immediate if required and an addition/subtraction. */
rtx scratch_rtx = gen_rtx_REG (mode, scratchreg);
if (emit_move_imm)
aarch64_internal_mov_immediate (scratch_rtx, GEN_INT (mdelta), true, mode);
insn = emit_insn (delta < 0 ? gen_sub2_insn (this_rtx, scratch_rtx)
: gen_add2_insn (this_rtx, scratch_rtx));
if (frame_related_p)
{
RTX_FRAME_RELATED_P (insn) = frame_related_p;
rtx adj = plus_constant (mode, this_rtx, delta);
add_reg_note (insn , REG_CFA_ADJUST_CFA, gen_rtx_SET (this_rtx, adj));
}
}
static inline void
aarch64_add_constant (scalar_int_mode mode, int regnum, int scratchreg,
HOST_WIDE_INT delta)
{
aarch64_add_constant_internal (mode, regnum, scratchreg, delta, false, true);
}
static inline void
aarch64_add_sp (int scratchreg, HOST_WIDE_INT delta, bool emit_move_imm)
{
aarch64_add_constant_internal (Pmode, SP_REGNUM, scratchreg, delta,
true, emit_move_imm);
}
static inline void
aarch64_sub_sp (int scratchreg, HOST_WIDE_INT delta, bool frame_related_p)
{
aarch64_add_constant_internal (Pmode, SP_REGNUM, scratchreg, -delta,
frame_related_p, true);
}
void void
aarch64_expand_mov_immediate (rtx dest, rtx imm) aarch64_expand_mov_immediate (rtx dest, rtx imm)
...@@ -2075,88 +2156,6 @@ aarch64_expand_mov_immediate (rtx dest, rtx imm) ...@@ -2075,88 +2156,6 @@ aarch64_expand_mov_immediate (rtx dest, rtx imm)
as_a <scalar_int_mode> (mode)); as_a <scalar_int_mode> (mode));
} }
/* Add DELTA to REGNUM in mode MODE. SCRATCHREG can be used to hold a
temporary value if necessary. FRAME_RELATED_P should be true if
the RTX_FRAME_RELATED flag should be set and CFA adjustments added
to the generated instructions. If SCRATCHREG is known to hold
abs (delta), EMIT_MOVE_IMM can be set to false to avoid emitting the
immediate again.
Since this function may be used to adjust the stack pointer, we must
ensure that it cannot cause transient stack deallocation (for example
by first incrementing SP and then decrementing when adjusting by a
large immediate). */
static void
aarch64_add_constant_internal (scalar_int_mode mode, int regnum,
int scratchreg, HOST_WIDE_INT delta,
bool frame_related_p, bool emit_move_imm)
{
HOST_WIDE_INT mdelta = abs_hwi (delta);
rtx this_rtx = gen_rtx_REG (mode, regnum);
rtx_insn *insn;
if (!mdelta)
return;
/* Single instruction adjustment. */
if (aarch64_uimm12_shift (mdelta))
{
insn = emit_insn (gen_add2_insn (this_rtx, GEN_INT (delta)));
RTX_FRAME_RELATED_P (insn) = frame_related_p;
return;
}
/* Emit 2 additions/subtractions if the adjustment is less than 24 bits.
Only do this if mdelta is not a 16-bit move as adjusting using a move
is better. */
if (mdelta < 0x1000000 && !aarch64_move_imm (mdelta, mode))
{
HOST_WIDE_INT low_off = mdelta & 0xfff;
low_off = delta < 0 ? -low_off : low_off;
insn = emit_insn (gen_add2_insn (this_rtx, GEN_INT (low_off)));
RTX_FRAME_RELATED_P (insn) = frame_related_p;
insn = emit_insn (gen_add2_insn (this_rtx, GEN_INT (delta - low_off)));
RTX_FRAME_RELATED_P (insn) = frame_related_p;
return;
}
/* Emit a move immediate if required and an addition/subtraction. */
rtx scratch_rtx = gen_rtx_REG (mode, scratchreg);
if (emit_move_imm)
aarch64_internal_mov_immediate (scratch_rtx, GEN_INT (mdelta), true, mode);
insn = emit_insn (delta < 0 ? gen_sub2_insn (this_rtx, scratch_rtx)
: gen_add2_insn (this_rtx, scratch_rtx));
if (frame_related_p)
{
RTX_FRAME_RELATED_P (insn) = frame_related_p;
rtx adj = plus_constant (mode, this_rtx, delta);
add_reg_note (insn , REG_CFA_ADJUST_CFA, gen_rtx_SET (this_rtx, adj));
}
}
static inline void
aarch64_add_constant (scalar_int_mode mode, int regnum, int scratchreg,
HOST_WIDE_INT delta)
{
aarch64_add_constant_internal (mode, regnum, scratchreg, delta, false, true);
}
static inline void
aarch64_add_sp (int scratchreg, HOST_WIDE_INT delta, bool emit_move_imm)
{
aarch64_add_constant_internal (Pmode, SP_REGNUM, scratchreg, delta,
true, emit_move_imm);
}
static inline void
aarch64_sub_sp (int scratchreg, HOST_WIDE_INT delta, bool frame_related_p)
{
aarch64_add_constant_internal (Pmode, SP_REGNUM, scratchreg, -delta,
frame_related_p, true);
}
static bool static bool
aarch64_function_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED, aarch64_function_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED,
tree exp ATTRIBUTE_UNUSED) tree exp ATTRIBUTE_UNUSED)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment