Commit ec18e48e by Vlad Lazar Committed by Jeff Law

expmed.h (canonicalize_comparison): New declaration.

	* expmed.h (canonicalize_comparison): New declaration.
	* expmed.c (canonicalize_comparison, equivalent_cmp_code): New function.
	* expmed.c (emit_store_flag_1): Add call to canonicalize_comparison.
	* optabs.c (prepare_cmp_insn): Likewise.
	* rtl.h (unsigned_condition_p): New function which checks if a
	comparison operator is unsigned.

	* gcc.target/aarch64/imm_choice_comparison.c: New test.

From-SVN: r263591
parent 891b1d68
2018-08-16 Vlad Lazar <vlad.lazar@arm.com>
* expmed.h (canonicalize_comparison): New declaration.
* expmed.c (canonicalize_comparison, equivalent_cmp_code): New function.
* expmed.c (emit_store_flag_1): Add call to canonicalize_comparison.
* optabs.c (prepare_cmp_insn): Likewise.
* rtl.h (unsigned_condition_p): New function which checks if a
comparison operator is unsigned.
2018-08-16 Nathan Sidwell <nathan@acm.org>
* config/rs6000/rs6000-c.c (rs6000_macro_to_expend): Use cpp_macro_p.
......
......@@ -5541,6 +5541,9 @@ emit_store_flag_1 (rtx target, enum rtx_code code, rtx op0, rtx op1,
if (mode == VOIDmode)
mode = GET_MODE (op0);
if (CONST_SCALAR_INT_P (op1))
canonicalize_comparison (mode, &code, &op1);
/* For some comparisons with 1 and -1, we can convert this to
comparisons with zero. This will often produce more opportunities for
store-flag insns. */
......@@ -6161,6 +6164,96 @@ emit_store_flag_force (rtx target, enum rtx_code code, rtx op0, rtx op1,
return target;
}
/* Helper function for canonicalize_cmp_for_target. Swap between inclusive
and exclusive ranges in order to create an equivalent comparison. See
canonicalize_cmp_for_target for the possible cases. */
static enum rtx_code
equivalent_cmp_code (enum rtx_code code)
{
switch (code)
{
case GT:
return GE;
case GE:
return GT;
case LT:
return LE;
case LE:
return LT;
case GTU:
return GEU;
case GEU:
return GTU;
case LTU:
return LEU;
case LEU:
return LTU;
default:
return code;
}
}
/* Choose the more appropiate immediate in scalar integer comparisons. The
purpose of this is to end up with an immediate which can be loaded into a
register in fewer moves, if possible.
For each integer comparison there exists an equivalent choice:
i) a > b or a >= b + 1
ii) a <= b or a < b + 1
iii) a >= b or a > b - 1
iv) a < b or a <= b - 1
MODE is the mode of the first operand.
CODE points to the comparison code.
IMM points to the rtx containing the immediate. *IMM must satisfy
CONST_SCALAR_INT_P on entry and continues to satisfy CONST_SCALAR_INT_P
on exit. */
void
canonicalize_comparison (machine_mode mode, enum rtx_code *code, rtx *imm)
{
if (!SCALAR_INT_MODE_P (mode))
return;
int to_add = 0;
enum signop sgn = unsigned_condition_p (*code) ? UNSIGNED : SIGNED;
/* Extract the immediate value from the rtx. */
wide_int imm_val = rtx_mode_t (*imm, mode);
if (*code == GT || *code == GTU || *code == LE || *code == LEU)
to_add = 1;
else if (*code == GE || *code == GEU || *code == LT || *code == LTU)
to_add = -1;
else
return;
/* Check for overflow/underflow in the case of signed values and
wrapping around in the case of unsigned values. If any occur
cancel the optimization. */
wi::overflow_type overflow = wi::OVF_NONE;
wide_int imm_modif = wi::add (imm_val, to_add, sgn, &overflow);
if (overflow)
return;
rtx reg = gen_rtx_REG (mode, LAST_VIRTUAL_REGISTER + 1);
rtx new_imm = immed_wide_int_const (imm_modif, mode);
rtx_insn *old_rtx = gen_move_insn (reg, *imm);
rtx_insn *new_rtx = gen_move_insn (reg, new_imm);
/* Update the immediate and the code. */
if (insn_cost (old_rtx, true) > insn_cost (new_rtx, true))
{
*code = equivalent_cmp_code (*code);
*imm = new_imm;
}
}
/* Perform possibly multi-word comparison and conditional jump to LABEL
if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE. This is
......
......@@ -702,6 +702,8 @@ extern rtx emit_store_flag (rtx, enum rtx_code, rtx, rtx, machine_mode,
extern rtx emit_store_flag_force (rtx, enum rtx_code, rtx, rtx,
machine_mode, int, int);
extern void canonicalize_comparison (machine_mode, enum rtx_code *, rtx *);
/* Choose a minimal N + 1 bit approximation to 1/D that can be used to
replace division by D, and put the least significant N bits of the result
in *MULTIPLIER_PTR and return the most significant bit. */
......
......@@ -3812,6 +3812,9 @@ prepare_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
gcc_assert (methods == OPTAB_DIRECT || methods == OPTAB_WIDEN
|| methods == OPTAB_LIB_WIDEN);
if (CONST_SCALAR_INT_P (y))
canonicalize_comparison (mode, &comparison, &y);
/* If we are optimizing, force expensive constants into a register. */
if (CONSTANT_P (x) && optimize
&& (rtx_cost (x, mode, COMPARE, 0, optimize_insn_for_speed_p ())
......
......@@ -3310,6 +3310,15 @@ extern enum rtx_code unsigned_condition (enum rtx_code);
extern enum rtx_code signed_condition (enum rtx_code);
extern void mark_jump_label (rtx, rtx_insn *, int);
/* Return true if integer comparison operator CODE interprets its operands
as unsigned. */
inline bool
unsigned_condition_p (enum rtx_code code)
{
return unsigned_condition (code) == code;
}
/* In jump.c */
extern rtx_insn *delete_related_insns (rtx);
......
2018-08-16 Vlad Lazar <vlad.lazar@arm.com>
* gcc.target/aarch64/imm_choice_comparison.c: New test.
2018-08-16 Iain Sandoe <iain@sandoe.co.uk>
* gcc.dg/memcmp-1.c (lib_memcmp): Apply __USER_LABEL_PREFIX__.
......
/* { dg-do compile } */
/* { dg-options "-O2" } */
/* Go from four moves to two. */
int
foo (long long x)
{
return x <= 0x1999999999999998;
}
int
GT (unsigned int x)
{
return x > 0xfefffffe;
}
int
LE (unsigned int x)
{
return x <= 0xfefffffe;
}
int
GE (long long x)
{
return x >= 0xff000000;
}
int
LT (int x)
{
return x < 0xff000000;
}
/* Optimize the immediate in conditionals. */
int
check (int x, int y)
{
if (x > y && GT (x))
return 100;
return x;
}
int
tern (int x)
{
return x >= 0xff000000 ? 5 : -3;
}
/* baz produces one movk instruction. */
/* { dg-final { scan-assembler-times "movk" 1 } } */
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment