Commit 1466e387 by Richard Henderson Committed by Richard Henderson

re PR rtl-optimization/15289 (reload error with non-lowpart subregs)

        PR rtl-opt/15289
        * emit-rtl.c (gen_complex_constant_part): Remove.
        (gen_realpart, gen_imagpart): Remove.
        * rtl.h (gen_realpart, gen_imagpart): Remove.
        * expmed.c (extract_bit_field): Remove CONCAT hack catering to
        gen_realpart/gen_imagpart.
        * expr.c (write_complex_part, read_complex_part): New.
        (emit_move_via_alt_mode, emit_move_via_integer, emit_move_resolve_push,
        emit_move_complex_push, emit_move_complex, emit_move_ccmode,
        emit_move_multi_word): Split out from ...
        (emit_move_insn_1): ... here.
        (expand_expr_real_1) <COMPLEX_EXPR>: Use write_complex_part.
        <REALPART_EXPR, IMAGPART_EXPR>: Use read_complex_part.
        * function.c (assign_parm_setup_reg): Hard-code transformations
        instead of using gen_realpart/gen_imagpart.

From-SVN: r91571
parent 8c1cfd5a
2004-12-01 Richard Henderson <rth@redhat.com> 2004-12-01 Richard Henderson <rth@redhat.com>
PR rtl-opt/15289
* emit-rtl.c (gen_complex_constant_part): Remove.
(gen_realpart, gen_imagpart): Remove.
* rtl.h (gen_realpart, gen_imagpart): Remove.
* expmed.c (extract_bit_field): Remove CONCAT hack catering to
gen_realpart/gen_imagpart.
* expr.c (write_complex_part, read_complex_part): New.
(emit_move_via_alt_mode, emit_move_via_integer, emit_move_resolve_push,
emit_move_complex_push, emit_move_complex, emit_move_ccmode,
emit_move_multi_word): Split out from ...
(emit_move_insn_1): ... here.
(expand_expr_real_1) <COMPLEX_EXPR>: Use write_complex_part.
<REALPART_EXPR, IMAGPART_EXPR>: Use read_complex_part.
* function.c (assign_parm_setup_reg): Hard-code transformations
instead of using gen_realpart/gen_imagpart.
* expr.c (optimize_bitfield_assignment_op): Split out from ... * expr.c (optimize_bitfield_assignment_op): Split out from ...
(expand_assignment): ... here. Use handled_component_p to gate (expand_assignment): ... here. Use handled_component_p to gate
get_inner_reference code. Simplify MEM handling. Special case get_inner_reference code. Simplify MEM handling. Special case
......
...@@ -184,7 +184,6 @@ static int reg_attrs_htab_eq (const void *, const void *); ...@@ -184,7 +184,6 @@ static int reg_attrs_htab_eq (const void *, const void *);
static reg_attrs *get_reg_attrs (tree, int); static reg_attrs *get_reg_attrs (tree, int);
static tree component_ref_for_mem_expr (tree); static tree component_ref_for_mem_expr (tree);
static rtx gen_const_vector (enum machine_mode, int); static rtx gen_const_vector (enum machine_mode, int);
static rtx gen_complex_constant_part (enum machine_mode, rtx, int);
static void copy_rtx_if_shared_1 (rtx *orig); static void copy_rtx_if_shared_1 (rtx *orig);
/* Probability of the conditional branch currently proceeded by try_split. /* Probability of the conditional branch currently proceeded by try_split.
...@@ -1169,81 +1168,6 @@ gen_lowpart_common (enum machine_mode mode, rtx x) ...@@ -1169,81 +1168,6 @@ gen_lowpart_common (enum machine_mode mode, rtx x)
return 0; return 0;
} }
/* Return the constant real or imaginary part (which has mode MODE)
of a complex value X. The IMAGPART_P argument determines whether
the real or complex component should be returned. This function
returns NULL_RTX if the component isn't a constant. */
static rtx
gen_complex_constant_part (enum machine_mode mode, rtx x, int imagpart_p)
{
tree decl, part;
if (MEM_P (x)
&& GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
{
decl = SYMBOL_REF_DECL (XEXP (x, 0));
if (decl != NULL_TREE && TREE_CODE (decl) == COMPLEX_CST)
{
part = imagpart_p ? TREE_IMAGPART (decl) : TREE_REALPART (decl);
if (TREE_CODE (part) == REAL_CST
|| TREE_CODE (part) == INTEGER_CST)
return expand_expr (part, NULL_RTX, mode, 0);
}
}
return NULL_RTX;
}
/* Return the real part (which has mode MODE) of a complex value X.
This always comes at the low address in memory. */
rtx
gen_realpart (enum machine_mode mode, rtx x)
{
rtx part;
/* Handle complex constants. */
part = gen_complex_constant_part (mode, x, 0);
if (part != NULL_RTX)
return part;
if (WORDS_BIG_ENDIAN
&& GET_MODE_BITSIZE (mode) < BITS_PER_WORD
&& REG_P (x)
&& REGNO (x) < FIRST_PSEUDO_REGISTER)
internal_error
("can't access real part of complex value in hard register");
else if (WORDS_BIG_ENDIAN)
return gen_highpart (mode, x);
else
return gen_lowpart (mode, x);
}
/* Return the imaginary part (which has mode MODE) of a complex value X.
This always comes at the high address in memory. */
rtx
gen_imagpart (enum machine_mode mode, rtx x)
{
rtx part;
/* Handle complex constants. */
part = gen_complex_constant_part (mode, x, 1);
if (part != NULL_RTX)
return part;
if (WORDS_BIG_ENDIAN)
return gen_lowpart (mode, x);
else if (! WORDS_BIG_ENDIAN
&& GET_MODE_BITSIZE (mode) < BITS_PER_WORD
&& REG_P (x)
&& REGNO (x) < FIRST_PSEUDO_REGISTER)
internal_error
("can't access imaginary part of complex value in hard register");
else
return gen_highpart (mode, x);
}
rtx rtx
gen_highpart (enum machine_mode mode, rtx x) gen_highpart (enum machine_mode mode, rtx x)
{ {
......
...@@ -1611,28 +1611,6 @@ extract_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize, ...@@ -1611,28 +1611,6 @@ extract_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
return spec_target; return spec_target;
if (GET_MODE (target) != tmode && GET_MODE (target) != mode) if (GET_MODE (target) != tmode && GET_MODE (target) != mode)
{ {
/* If the target mode is complex, then extract the two scalar elements
from the value now. Creating (subreg:SC (reg:DI) 0), as we would do
with the clause below, will cause gen_realpart or gen_imagpart to
fail, since those functions must return lvalues. */
if (COMPLEX_MODE_P (tmode))
{
rtx realpart, imagpart;
enum machine_mode itmode = GET_MODE_INNER (tmode);
target = convert_to_mode (mode_for_size (GET_MODE_BITSIZE (tmode),
MODE_INT, 0),
target, unsignedp);
realpart = extract_bit_field (target, GET_MODE_BITSIZE (itmode), 0,
unsignedp, NULL, itmode, itmode);
imagpart = extract_bit_field (target, GET_MODE_BITSIZE (itmode),
GET_MODE_BITSIZE (itmode), unsignedp,
NULL, itmode, itmode);
return gen_rtx_CONCAT (tmode, realpart, imagpart);
}
/* If the target mode is not a scalar integral, first convert to the /* If the target mode is not a scalar integral, first convert to the
integer mode of that size and then access it as a floating-point integer mode of that size and then access it as a floating-point
value via a SUBREG. */ value via a SUBREG. */
......
...@@ -2574,404 +2574,467 @@ clear_storage_libcall_fn (int for_call) ...@@ -2574,404 +2574,467 @@ clear_storage_libcall_fn (int for_call)
return block_clear_fn; return block_clear_fn;
} }
/* Generate code to copy Y into X. /* Write to one of the components of the complex value CPLX. Write VAL to
Both Y and X must have the same mode, except that the real part if IMAG_P is false, and the imaginary part if its true. */
Y can be a constant with VOIDmode.
This mode cannot be BLKmode; use emit_block_move for that.
Return the last instruction emitted. */ static void
write_complex_part (rtx cplx, rtx val, bool imag_p)
{
if (GET_CODE (cplx) == CONCAT)
emit_move_insn (XEXP (cplx, imag_p), val);
else
{
enum machine_mode cmode = GET_MODE (cplx);
enum machine_mode imode = GET_MODE_INNER (cmode);
unsigned ibitsize = GET_MODE_BITSIZE (imode);
rtx store_bit_field (cplx, ibitsize, imag_p ? ibitsize : 0, imode, val);
emit_move_insn (rtx x, rtx y) }
}
/* Extract one of the components of the complex value CPLX. Extract the
real part if IMAG_P is false, and the imaginary part if it's true. */
static rtx
read_complex_part (rtx cplx, bool imag_p)
{ {
enum machine_mode mode = GET_MODE (x); enum machine_mode cmode, imode;
rtx y_cst = NULL_RTX; unsigned ibitsize;
rtx last_insn, set;
gcc_assert (mode != BLKmode if (GET_CODE (cplx) == CONCAT)
&& (GET_MODE (y) == mode || GET_MODE (y) == VOIDmode)); return XEXP (cplx, imag_p);
if (CONSTANT_P (y)) cmode = GET_MODE (cplx);
imode = GET_MODE_INNER (cmode);
ibitsize = GET_MODE_BITSIZE (imode);
/* Special case reads from complex constants that got spilled to memory. */
if (MEM_P (cplx) && GET_CODE (XEXP (cplx, 0)) == SYMBOL_REF)
{ {
if (optimize tree decl = SYMBOL_REF_DECL (XEXP (cplx, 0));
&& SCALAR_FLOAT_MODE_P (GET_MODE (x)) if (decl && TREE_CODE (decl) == COMPLEX_CST)
&& (last_insn = compress_float_constant (x, y))) {
return last_insn; tree part = imag_p ? TREE_IMAGPART (decl) : TREE_REALPART (decl);
if (CONSTANT_CLASS_P (part))
return expand_expr (part, NULL_RTX, imode, EXPAND_NORMAL);
}
}
y_cst = y; return extract_bit_field (cplx, ibitsize, imag_p ? ibitsize : 0,
true, NULL_RTX, imode, imode);
}
/* A subroutine of emit_move_insn_1. Generate a move from Y into X using
ALT_MODE instead of the operand's natural mode, MODE. CODE is the insn
code for the move in ALT_MODE, and is known to be valid. Returns the
instruction emitted. */
if (!LEGITIMATE_CONSTANT_P (y)) static rtx
emit_move_via_alt_mode (enum machine_mode alt_mode, enum machine_mode mode,
enum insn_code code, rtx x, rtx y)
{
/* Get X and Y in ALT_MODE. We can't use gen_lowpart here because it
may call change_address which is not appropriate if we were
called when a reload was in progress. We don't have to worry
about changing the address since the size in bytes is supposed to
be the same. Copy the MEM to change the mode and move any
substitutions from the old MEM to the new one. */
if (reload_in_progress)
{
rtx x1 = x, y1 = y;
x = gen_lowpart_common (alt_mode, x1);
if (x == 0 && MEM_P (x1))
{ {
y = force_const_mem (mode, y); x = adjust_address_nv (x1, alt_mode, 0);
copy_replacements (x1, x);
}
/* If the target's cannot_force_const_mem prevented the spill, y = gen_lowpart_common (alt_mode, y1);
assume that the target's move expanders will also take care if (y == 0 && MEM_P (y1))
of the non-legitimate constant. */ {
if (!y) y = adjust_address_nv (y1, alt_mode, 0);
y = y_cst; copy_replacements (y1, y);
} }
} }
else
{
x = simplify_gen_subreg (alt_mode, x, mode, 0);
y = simplify_gen_subreg (alt_mode, y, mode, 0);
}
/* If X or Y are memory references, verify that their addresses are valid return emit_insn (GEN_FCN (code) (x, y));
for the machine. */ }
if (MEM_P (x)
&& ((! memory_address_p (GET_MODE (x), XEXP (x, 0))
&& ! push_operand (x, GET_MODE (x)))
|| (flag_force_addr
&& CONSTANT_ADDRESS_P (XEXP (x, 0)))))
x = validize_mem (x);
if (MEM_P (y) /* A subroutine of emit_move_insn_1. Generate a move from Y into X using
&& (! memory_address_p (GET_MODE (y), XEXP (y, 0)) an integer mode of the same size as MODE. Returns the instruction
|| (flag_force_addr emitted, or NULL if such a move could not be generated. */
&& CONSTANT_ADDRESS_P (XEXP (y, 0)))))
y = validize_mem (y);
gcc_assert (mode != BLKmode); static rtx
emit_move_via_integer (enum machine_mode mode, rtx x, rtx y)
{
enum machine_mode imode;
enum insn_code code;
last_insn = emit_move_insn_1 (x, y); /* There must exist a mode of the exact size we require. */
imode = int_mode_for_mode (mode);
if (imode == BLKmode)
return NULL_RTX;
if (y_cst && REG_P (x) /* The target must support moves in this mode. */
&& (set = single_set (last_insn)) != NULL_RTX code = mov_optab->handlers[imode].insn_code;
&& SET_DEST (set) == x if (code == CODE_FOR_nothing)
&& ! rtx_equal_p (y_cst, SET_SRC (set))) return NULL_RTX;
set_unique_reg_note (last_insn, REG_EQUAL, y_cst);
return last_insn; return emit_move_via_alt_mode (imode, mode, code, x, y);
} }
/* Low level part of emit_move_insn. /* A subroutine of emit_move_insn_1. X is a push_operand in MODE.
Called just like emit_move_insn, but assumes X and Y Return an equivalent MEM that does not use an auto-increment. */
are basically valid. */
rtx static rtx
emit_move_insn_1 (rtx x, rtx y) emit_move_resolve_push (enum machine_mode mode, rtx x)
{ {
enum machine_mode mode = GET_MODE (x); enum rtx_code code = GET_CODE (XEXP (x, 0));
enum machine_mode submode; HOST_WIDE_INT adjust;
rtx temp;
gcc_assert ((unsigned int) mode < (unsigned int) MAX_MACHINE_MODE); adjust = GET_MODE_SIZE (mode);
#ifdef PUSH_ROUNDING
adjust = PUSH_ROUNDING (adjust);
#endif
if (code == PRE_DEC || code == POST_DEC)
adjust = -adjust;
if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing) /* Do not use anti_adjust_stack, since we don't want to update
return stack_pointer_delta. */
emit_insn (GEN_FCN (mov_optab->handlers[(int) mode].insn_code) (x, y)); temp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
GEN_INT (adjust), stack_pointer_rtx,
0, OPTAB_LIB_WIDEN);
if (temp != stack_pointer_rtx)
emit_move_insn (stack_pointer_rtx, temp);
/* Expand complex moves by moving real part and imag part, if possible. */ switch (code)
else if (COMPLEX_MODE_P (mode)
&& BLKmode != (submode = GET_MODE_INNER (mode))
&& (mov_optab->handlers[(int) submode].insn_code
!= CODE_FOR_nothing))
{ {
unsigned int modesize = GET_MODE_SIZE (mode); case PRE_INC:
unsigned int submodesize = GET_MODE_SIZE (submode); case PRE_DEC:
temp = stack_pointer_rtx;
/* Don't split destination if it is a stack push. */ break;
int stack = push_operand (x, mode); case POST_INC:
temp = plus_constant (stack_pointer_rtx, -GET_MODE_SIZE (mode));
break;
case POST_DEC:
temp = plus_constant (stack_pointer_rtx, GET_MODE_SIZE (mode));
break;
default:
gcc_unreachable ();
}
#ifdef PUSH_ROUNDING return replace_equiv_address (x, temp);
/* In case we output to the stack, but the size is smaller than the }
machine can push exactly, we need to use move instructions. */
if (stack && PUSH_ROUNDING (submodesize) != submodesize)
{
rtx temp;
HOST_WIDE_INT offset1, offset2;
/* Do not use anti_adjust_stack, since we don't want to update /* A subroutine of emit_move_complex. Generate a move from Y into X.
stack_pointer_delta. */ X is known to satisfy push_operand, and MODE is known to be complex.
temp = expand_binop (Pmode, Returns the last instruction emitted. */
#ifdef STACK_GROWS_DOWNWARD
sub_optab,
#else
add_optab,
#endif
stack_pointer_rtx,
GEN_INT (PUSH_ROUNDING (modesize)),
stack_pointer_rtx, 0, OPTAB_LIB_WIDEN);
if (temp != stack_pointer_rtx) static rtx
emit_move_insn (stack_pointer_rtx, temp); emit_move_complex_push (enum machine_mode mode, rtx x, rtx y)
{
enum machine_mode submode = GET_MODE_INNER (mode);
bool imag_first;
#ifdef STACK_GROWS_DOWNWARD #ifdef PUSH_ROUNDING
offset1 = 0; unsigned int submodesize = GET_MODE_SIZE (submode);
offset2 = submodesize;
#else
offset1 = -PUSH_ROUNDING (modesize);
offset2 = -PUSH_ROUNDING (modesize) + submodesize;
#endif
emit_move_insn (change_address (x, submode, /* In case we output to the stack, but the size is smaller than the
gen_rtx_PLUS (Pmode, machine can push exactly, we need to use move instructions. */
stack_pointer_rtx, if (PUSH_ROUNDING (submodesize) != submodesize)
GEN_INT (offset1))), {
gen_realpart (submode, y)); x = emit_move_resolve_push (mode, x);
emit_move_insn (change_address (x, submode, return emit_move_insn (x, y);
gen_rtx_PLUS (Pmode, }
stack_pointer_rtx,
GEN_INT (offset2))),
gen_imagpart (submode, y));
}
else
#endif #endif
/* If this is a stack, push the highpart first, so it
will be in the argument order.
In that case, change_address is used only to convert /* Note that the real part always precedes the imag part in memory
the mode, not to change the address. */ regardless of machine's endianness. */
if (stack) switch (GET_CODE (XEXP (x, 0)))
{ {
/* Note that the real part always precedes the imag part in memory case PRE_DEC:
regardless of machine's endianness. */ case POST_DEC:
#ifdef STACK_GROWS_DOWNWARD imag_first = true;
emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)), break;
gen_imagpart (submode, y)); case PRE_INC:
emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)), case POST_INC:
gen_realpart (submode, y)); imag_first = false;
#else break;
emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)), default:
gen_realpart (submode, y)); gcc_unreachable ();
emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)), }
gen_imagpart (submode, y));
#endif
}
else
{
rtx realpart_x, realpart_y;
rtx imagpart_x, imagpart_y;
/* If this is a complex value with each part being smaller than a
word, the usual calling sequence will likely pack the pieces into
a single register. Unfortunately, SUBREG of hard registers only
deals in terms of words, so we have a problem converting input
arguments to the CONCAT of two registers that is used elsewhere
for complex values. If this is before reload, we can copy it into
memory and reload. FIXME, we should see about using extract and
insert on integer registers, but complex short and complex char
variables should be rarely used. */
if ((reload_in_progress | reload_completed) == 0
&& (!validate_subreg (submode, mode, NULL, submodesize)
|| !validate_subreg (submode, mode, NULL, 0)))
{
if (REG_P (x) || REG_P (y))
{
rtx mem, cmem;
enum machine_mode reg_mode
= mode_for_size (GET_MODE_BITSIZE (mode), MODE_INT, 1);
gcc_assert (reg_mode != BLKmode); emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)),
read_complex_part (y, imag_first));
return emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)),
read_complex_part (y, !imag_first));
}
mem = assign_stack_temp (reg_mode, modesize, 0); /* A subroutine of emit_move_insn_1. Generate a move from Y into X.
cmem = adjust_address (mem, mode, 0); MODE is known to be complex. Returns the last instruction emitted. */
if (REG_P (x)) static rtx
{ emit_move_complex (enum machine_mode mode, rtx x, rtx y)
rtx sreg = gen_rtx_SUBREG (reg_mode, x, 0); {
emit_move_insn_1 (cmem, y); bool try_int;
return emit_move_insn_1 (sreg, mem);
}
else
{
rtx sreg = gen_rtx_SUBREG (reg_mode, y, 0);
emit_move_insn_1 (mem, sreg);
return emit_move_insn_1 (x, cmem);
}
}
}
realpart_x = gen_realpart (submode, x); /* Need to take special care for pushes, to maintain proper ordering
realpart_y = gen_realpart (submode, y); of the data, and possibly extra padding. */
imagpart_x = gen_imagpart (submode, x); if (push_operand (x, mode))
imagpart_y = gen_imagpart (submode, y); return emit_move_complex_push (mode, x, y);
/* Show the output dies here. This is necessary for SUBREGs
of pseudos since we cannot track their lifetimes correctly;
hard regs shouldn't appear here except as return values.
We never want to emit such a clobber after reload. */
if (x != y
&& ! (reload_in_progress || reload_completed)
&& (GET_CODE (realpart_x) == SUBREG
|| GET_CODE (imagpart_x) == SUBREG))
emit_insn (gen_rtx_CLOBBER (VOIDmode, x));
emit_move_insn (realpart_x, realpart_y);
emit_move_insn (imagpart_x, imagpart_y);
}
/* For memory to memory moves, optimial behaviour can be had with the
existing block move logic. */
if (MEM_P (x) && MEM_P (y))
{
emit_block_move (x, y, GEN_INT (GET_MODE_SIZE (mode)),
BLOCK_OP_NO_LIBCALL);
return get_last_insn (); return get_last_insn ();
} }
/* Handle MODE_CC modes: If we don't have a special move insn for this mode, /* See if we can coerce the target into moving both values at once. */
find a mode to do it in. If we have a movcc, use it. Otherwise,
find the MODE_INT mode of the same width. */ /* Not possible if the values are inherently not adjacent. */
else if (GET_MODE_CLASS (mode) == MODE_CC if (GET_CODE (x) == CONCAT || GET_CODE (y) == CONCAT)
&& mov_optab->handlers[(int) mode].insn_code == CODE_FOR_nothing) try_int = false;
/* Is possible if both are registers (or subregs of registers). */
else if (register_operand (x, mode) && register_operand (y, mode))
try_int = true;
/* If one of the operands is a memory, and alignment constraints
are friendly enough, we may be able to do combined memory operations.
We do not attempt this if Y is a constant because that combination is
usually better with the by-parts thing below. */
else if ((MEM_P (x) ? !CONSTANT_P (y) : MEM_P (y))
&& (!STRICT_ALIGNMENT
|| get_mode_alignment (mode) == BIGGEST_ALIGNMENT))
try_int = true;
else
try_int = false;
if (try_int)
{ {
enum insn_code insn_code; rtx ret = emit_move_via_integer (mode, x, y);
enum machine_mode tmode = VOIDmode; if (ret)
rtx x1 = x, y1 = y; return ret;
}
if (mode != CCmode /* Show the output dies here. This is necessary for SUBREGs
&& mov_optab->handlers[(int) CCmode].insn_code != CODE_FOR_nothing) of pseudos since we cannot track their lifetimes correctly;
tmode = CCmode; hard regs shouldn't appear here except as return values. */
else if (!reload_completed && !reload_in_progress
for (tmode = QImode; tmode != VOIDmode; && REG_P (x) && !reg_overlap_mentioned_p (x, y))
tmode = GET_MODE_WIDER_MODE (tmode)) emit_insn (gen_rtx_CLOBBER (VOIDmode, x));
if (GET_MODE_SIZE (tmode) == GET_MODE_SIZE (mode))
break;
gcc_assert (tmode != VOIDmode); write_complex_part (x, read_complex_part (y, false), false);
write_complex_part (x, read_complex_part (y, true), true);
return get_last_insn ();
}
/* Get X and Y in TMODE. We can't use gen_lowpart here because it /* A subroutine of emit_move_insn_1. Generate a move from Y into X.
may call change_address which is not appropriate if we were MODE is known to be MODE_CC. Returns the last instruction emitted. */
called when a reload was in progress. We don't have to worry
about changing the address since the size in bytes is supposed to
be the same. Copy the MEM to change the mode and move any
substitutions from the old MEM to the new one. */
if (reload_in_progress) static rtx
{ emit_move_ccmode (enum machine_mode mode, rtx x, rtx y)
x = gen_lowpart_common (tmode, x1); {
if (x == 0 && MEM_P (x1)) rtx ret;
{
x = adjust_address_nv (x1, tmode, 0);
copy_replacements (x1, x);
}
y = gen_lowpart_common (tmode, y1); /* Assume all MODE_CC modes are equivalent; if we have movcc, use it. */
if (y == 0 && MEM_P (y1)) if (mode != CCmode)
{ {
y = adjust_address_nv (y1, tmode, 0); enum insn_code code = mov_optab->handlers[CCmode].insn_code;
copy_replacements (y1, y); if (code != CODE_FOR_nothing)
} return emit_move_via_alt_mode (CCmode, mode, code, x, y);
} }
else
/* Otherwise, find the MODE_INT mode of the same width. */
ret = emit_move_via_integer (mode, x, y);
gcc_assert (ret != NULL);
return ret;
}
/* A subroutine of emit_move_insn_1. Generate a move from Y into X.
MODE is any multi-word or full-word mode that lacks a move_insn
pattern. Note that you will get better code if you define such
patterns, even if they must turn into multiple assembler instructions. */
static rtx
emit_move_multi_word (enum machine_mode mode, rtx x, rtx y)
{
rtx last_insn = 0;
rtx seq, inner;
bool need_clobber;
int i;
gcc_assert (GET_MODE_SIZE (mode) >= UNITS_PER_WORD);
/* If X is a push on the stack, do the push now and replace
X with a reference to the stack pointer. */
if (push_operand (x, mode))
x = emit_move_resolve_push (mode, x);
/* If we are in reload, see if either operand is a MEM whose address
is scheduled for replacement. */
if (reload_in_progress && MEM_P (x)
&& (inner = find_replacement (&XEXP (x, 0))) != XEXP (x, 0))
x = replace_equiv_address_nv (x, inner);
if (reload_in_progress && MEM_P (y)
&& (inner = find_replacement (&XEXP (y, 0))) != XEXP (y, 0))
y = replace_equiv_address_nv (y, inner);
start_sequence ();
need_clobber = false;
for (i = 0;
i < (GET_MODE_SIZE (mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD;
i++)
{
rtx xpart = operand_subword (x, i, 1, mode);
rtx ypart = operand_subword (y, i, 1, mode);
/* If we can't get a part of Y, put Y into memory if it is a
constant. Otherwise, force it into a register. If we still
can't get a part of Y, abort. */
if (ypart == 0 && CONSTANT_P (y))
{ {
x = gen_lowpart (tmode, x); y = force_const_mem (mode, y);
y = gen_lowpart (tmode, y); ypart = operand_subword (y, i, 1, mode);
} }
else if (ypart == 0)
ypart = operand_subword_force (y, i, mode);
gcc_assert (xpart && ypart);
need_clobber |= (GET_CODE (xpart) == SUBREG);
insn_code = mov_optab->handlers[(int) tmode].insn_code; last_insn = emit_move_insn (xpart, ypart);
return emit_insn (GEN_FCN (insn_code) (x, y));
} }
seq = get_insns ();
end_sequence ();
/* Show the output dies here. This is necessary for SUBREGs
of pseudos since we cannot track their lifetimes correctly;
hard regs shouldn't appear here except as return values.
We never want to emit such a clobber after reload. */
if (x != y
&& ! (reload_in_progress || reload_completed)
&& need_clobber != 0)
emit_insn (gen_rtx_CLOBBER (VOIDmode, x));
emit_insn (seq);
return last_insn;
}
/* Low level part of emit_move_insn.
Called just like emit_move_insn, but assumes X and Y
are basically valid. */
rtx
emit_move_insn_1 (rtx x, rtx y)
{
enum machine_mode mode = GET_MODE (x);
enum insn_code code;
gcc_assert ((unsigned int) mode < (unsigned int) MAX_MACHINE_MODE);
code = mov_optab->handlers[mode].insn_code;
if (code != CODE_FOR_nothing)
return emit_insn (GEN_FCN (code) (x, y));
/* Expand complex moves by moving real part and imag part. */
if (COMPLEX_MODE_P (mode))
return emit_move_complex (mode, x, y);
if (GET_MODE_CLASS (mode) == MODE_CC)
return emit_move_ccmode (mode, x, y);
/* Try using a move pattern for the corresponding integer mode. This is /* Try using a move pattern for the corresponding integer mode. This is
only safe when simplify_subreg can convert MODE constants into integer only safe when simplify_subreg can convert MODE constants into integer
constants. At present, it can only do this reliably if the value constants. At present, it can only do this reliably if the value
fits within a HOST_WIDE_INT. */ fits within a HOST_WIDE_INT. */
else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT if (!CONSTANT_P (y) || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
&& (submode = int_mode_for_mode (mode)) != BLKmode
&& mov_optab->handlers[submode].insn_code != CODE_FOR_nothing)
return emit_insn (GEN_FCN (mov_optab->handlers[submode].insn_code)
(simplify_gen_subreg (submode, x, mode, 0),
simplify_gen_subreg (submode, y, mode, 0)));
/* This will handle any multi-word or full-word mode that lacks a move_insn
pattern. However, you will get better code if you define such patterns,
even if they must turn into multiple assembler instructions. */
else
{ {
rtx last_insn = 0; rtx ret = emit_move_via_integer (mode, x, y);
rtx seq, inner; if (ret)
int need_clobber; return ret;
int i; }
gcc_assert (GET_MODE_SIZE (mode) >= UNITS_PER_WORD);
#ifdef PUSH_ROUNDING
/* If X is a push on the stack, do the push now and replace
X with a reference to the stack pointer. */
if (push_operand (x, GET_MODE (x)))
{
rtx temp;
enum rtx_code code;
/* Do not use anti_adjust_stack, since we don't want to update return emit_move_multi_word (mode, x, y);
stack_pointer_delta. */ }
temp = expand_binop (Pmode,
#ifdef STACK_GROWS_DOWNWARD
sub_optab,
#else
add_optab,
#endif
stack_pointer_rtx,
GEN_INT
(PUSH_ROUNDING
(GET_MODE_SIZE (GET_MODE (x)))),
stack_pointer_rtx, 0, OPTAB_LIB_WIDEN);
if (temp != stack_pointer_rtx)
emit_move_insn (stack_pointer_rtx, temp);
code = GET_CODE (XEXP (x, 0));
/* Just hope that small offsets off SP are OK. */
if (code == POST_INC)
temp = gen_rtx_PLUS (Pmode, stack_pointer_rtx,
GEN_INT (-((HOST_WIDE_INT)
GET_MODE_SIZE (GET_MODE (x)))));
else if (code == POST_DEC)
temp = gen_rtx_PLUS (Pmode, stack_pointer_rtx,
GEN_INT (GET_MODE_SIZE (GET_MODE (x))));
else
temp = stack_pointer_rtx;
x = change_address (x, VOIDmode, temp); /* Generate code to copy Y into X.
} Both Y and X must have the same mode, except that
#endif Y can be a constant with VOIDmode.
This mode cannot be BLKmode; use emit_block_move for that.
/* If we are in reload, see if either operand is a MEM whose address Return the last instruction emitted. */
is scheduled for replacement. */
if (reload_in_progress && MEM_P (x)
&& (inner = find_replacement (&XEXP (x, 0))) != XEXP (x, 0))
x = replace_equiv_address_nv (x, inner);
if (reload_in_progress && MEM_P (y)
&& (inner = find_replacement (&XEXP (y, 0))) != XEXP (y, 0))
y = replace_equiv_address_nv (y, inner);
start_sequence (); rtx
emit_move_insn (rtx x, rtx y)
{
enum machine_mode mode = GET_MODE (x);
rtx y_cst = NULL_RTX;
rtx last_insn, set;
need_clobber = 0; gcc_assert (mode != BLKmode
for (i = 0; && (GET_MODE (y) == mode || GET_MODE (y) == VOIDmode));
i < (GET_MODE_SIZE (mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD;
i++)
{
rtx xpart = operand_subword (x, i, 1, mode);
rtx ypart = operand_subword (y, i, 1, mode);
/* If we can't get a part of Y, put Y into memory if it is a if (CONSTANT_P (y))
constant. Otherwise, force it into a register. If we still {
can't get a part of Y, abort. */ if (optimize
if (ypart == 0 && CONSTANT_P (y)) && SCALAR_FLOAT_MODE_P (GET_MODE (x))
{ && (last_insn = compress_float_constant (x, y)))
y = force_const_mem (mode, y); return last_insn;
ypart = operand_subword (y, i, 1, mode);
}
else if (ypart == 0)
ypart = operand_subword_force (y, i, mode);
gcc_assert (xpart && ypart); y_cst = y;
need_clobber |= (GET_CODE (xpart) == SUBREG); if (!LEGITIMATE_CONSTANT_P (y))
{
y = force_const_mem (mode, y);
last_insn = emit_move_insn (xpart, ypart); /* If the target's cannot_force_const_mem prevented the spill,
assume that the target's move expanders will also take care
of the non-legitimate constant. */
if (!y)
y = y_cst;
} }
}
seq = get_insns (); /* If X or Y are memory references, verify that their addresses are valid
end_sequence (); for the machine. */
if (MEM_P (x)
&& ((! memory_address_p (GET_MODE (x), XEXP (x, 0))
&& ! push_operand (x, GET_MODE (x)))
|| (flag_force_addr
&& CONSTANT_ADDRESS_P (XEXP (x, 0)))))
x = validize_mem (x);
/* Show the output dies here. This is necessary for SUBREGs if (MEM_P (y)
of pseudos since we cannot track their lifetimes correctly; && (! memory_address_p (GET_MODE (y), XEXP (y, 0))
hard regs shouldn't appear here except as return values. || (flag_force_addr
We never want to emit such a clobber after reload. */ && CONSTANT_ADDRESS_P (XEXP (y, 0)))))
if (x != y y = validize_mem (y);
&& ! (reload_in_progress || reload_completed)
&& need_clobber != 0)
emit_insn (gen_rtx_CLOBBER (VOIDmode, x));
emit_insn (seq); gcc_assert (mode != BLKmode);
return last_insn; last_insn = emit_move_insn_1 (x, y);
}
if (y_cst && REG_P (x)
&& (set = single_set (last_insn)) != NULL_RTX
&& SET_DEST (set) == x
&& ! rtx_equal_p (y_cst, SET_SRC (set)))
set_unique_reg_note (last_insn, REG_EQUAL, y_cst);
return last_insn;
} }
/* If Y is representable exactly in a narrower mode, and the target can /* If Y is representable exactly in a narrower mode, and the target can
...@@ -8083,47 +8146,27 @@ expand_expr_real_1 (tree exp, rtx target, enum machine_mode tmode, ...@@ -8083,47 +8146,27 @@ expand_expr_real_1 (tree exp, rtx target, enum machine_mode tmode,
case ADDR_EXPR: case ADDR_EXPR:
return expand_expr_addr_expr (exp, target, tmode, modifier); return expand_expr_addr_expr (exp, target, tmode, modifier);
/* COMPLEX type for Extended Pascal & Fortran */
case COMPLEX_EXPR: case COMPLEX_EXPR:
{ /* Get the rtx code of the operands. */
enum machine_mode mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (exp))); op0 = expand_expr (TREE_OPERAND (exp, 0), 0, VOIDmode, 0);
rtx insns; op1 = expand_expr (TREE_OPERAND (exp, 1), 0, VOIDmode, 0);
/* Get the rtx code of the operands. */
op0 = expand_expr (TREE_OPERAND (exp, 0), 0, VOIDmode, 0);
op1 = expand_expr (TREE_OPERAND (exp, 1), 0, VOIDmode, 0);
if (! target)
target = gen_reg_rtx (TYPE_MODE (TREE_TYPE (exp)));
start_sequence ();
/* Move the real (op0) and imaginary (op1) parts to their location. */
emit_move_insn (gen_realpart (mode, target), op0);
emit_move_insn (gen_imagpart (mode, target), op1);
insns = get_insns (); if (!target)
end_sequence (); target = gen_reg_rtx (TYPE_MODE (TREE_TYPE (exp)));
/* Complex construction should appear as a single unit. */ /* Move the real (op0) and imaginary (op1) parts to their location. */
/* If TARGET is a CONCAT, we got insns like RD = RS, ID = IS, write_complex_part (target, op0, false);
each with a separate pseudo as destination. write_complex_part (target, op1, true);
It's not correct for flow to treat them as a unit. */
if (GET_CODE (target) != CONCAT)
emit_no_conflict_block (insns, target, op0, op1, NULL_RTX);
else
emit_insn (insns);
return target; return target;
}
case REALPART_EXPR: case REALPART_EXPR:
op0 = expand_expr (TREE_OPERAND (exp, 0), 0, VOIDmode, 0); op0 = expand_expr (TREE_OPERAND (exp, 0), 0, VOIDmode, 0);
return gen_realpart (mode, op0); return read_complex_part (op0, false);
case IMAGPART_EXPR: case IMAGPART_EXPR:
op0 = expand_expr (TREE_OPERAND (exp, 0), 0, VOIDmode, 0); op0 = expand_expr (TREE_OPERAND (exp, 0), 0, VOIDmode, 0);
return gen_imagpart (mode, op0); return read_complex_part (op0, true);
case RESX_EXPR: case RESX_EXPR:
expand_resx_expr (exp); expand_resx_expr (exp);
......
...@@ -2869,10 +2869,11 @@ assign_parm_setup_reg (struct assign_parm_data_all *all, tree parm, ...@@ -2869,10 +2869,11 @@ assign_parm_setup_reg (struct assign_parm_data_all *all, tree parm,
{ {
enum machine_mode submode enum machine_mode submode
= GET_MODE_INNER (GET_MODE (parmreg)); = GET_MODE_INNER (GET_MODE (parmreg));
int regnor = REGNO (gen_realpart (submode, parmreg)); int regnor = REGNO (XEXP (parmreg, 0));
int regnoi = REGNO (gen_imagpart (submode, parmreg)); int regnoi = REGNO (XEXP (parmreg, 1));
rtx stackr = gen_realpart (submode, data->stack_parm); rtx stackr = adjust_address_nv (data->stack_parm, submode, 0);
rtx stacki = gen_imagpart (submode, data->stack_parm); rtx stacki = adjust_address_nv (data->stack_parm, submode,
GET_MODE_SIZE (submode));
/* Scan backwards for the set of the real and /* Scan backwards for the set of the real and
imaginary parts. */ imaginary parts. */
......
...@@ -1383,8 +1383,6 @@ extern rtx gen_lowpart_if_possible (enum machine_mode, rtx); ...@@ -1383,8 +1383,6 @@ extern rtx gen_lowpart_if_possible (enum machine_mode, rtx);
/* In emit-rtl.c */ /* In emit-rtl.c */
extern rtx gen_highpart (enum machine_mode, rtx); extern rtx gen_highpart (enum machine_mode, rtx);
extern rtx gen_highpart_mode (enum machine_mode, enum machine_mode, rtx); extern rtx gen_highpart_mode (enum machine_mode, enum machine_mode, rtx);
extern rtx gen_realpart (enum machine_mode, rtx);
extern rtx gen_imagpart (enum machine_mode, rtx);
extern rtx operand_subword (rtx, unsigned int, int, enum machine_mode); extern rtx operand_subword (rtx, unsigned int, int, enum machine_mode);
/* In emit-rtl.c */ /* In emit-rtl.c */
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment