Commit 96043e7e by Richard Henderson Committed by Richard Henderson

alpha.c (aligned_memory_operand): Handle out of range stack slots.

        * alpha.c (aligned_memory_operand): Handle out of range stack slots.
        Take a new SCRATCH argument for the occasion.  Update all callers.
        (get_unaligned_address): Abort on out of range stack slots.
        * alpha.md (adddi3 splitter): Check s_p_rtx not REGNO.
        (reload_inqi): Check for aligned mems before unaligned.
        (reload_inhi): Likewise.

From-SVN: r26362
parent 55a98783
Mon Apr 12 03:07:44 1999 Richard Henderson <rth@cygnus.com>
* alpha.c (aligned_memory_operand): Handle out of range stack slots.
Take a new SCRATCH argument for the occasion. Update all callers.
(get_unaligned_address): Abort on out of range stack slots.
* alpha.md (adddi3 splitter): Check s_p_rtx not REGNO.
(reload_inqi): Check for aligned mems before unaligned.
(reload_inhi): Likewise.
Mon Apr 12 03:11:30 1999 Jeffrey A Law (law@cygnus.com) Mon Apr 12 03:11:30 1999 Jeffrey A Law (law@cygnus.com)
* flow.c (flow_delete_insn): If we delete a CODE_LABEL, also remove * flow.c (flow_delete_insn): If we delete a CODE_LABEL, also remove
......
...@@ -728,11 +728,7 @@ divmod_operator (op, mode) ...@@ -728,11 +728,7 @@ divmod_operator (op, mode)
a constant. It must be a valid address. This means that we can do a constant. It must be a valid address. This means that we can do
this as an aligned reference plus some offset. this as an aligned reference plus some offset.
Take into account what reload will do. Take into account what reload will do. */
We could say that out-of-range stack slots are alignable, but that would
complicate get_aligned_mem and it isn't worth the trouble since few
functions have large stack space. */
int int
aligned_memory_operand (op, mode) aligned_memory_operand (op, mode)
...@@ -747,11 +743,18 @@ aligned_memory_operand (op, mode) ...@@ -747,11 +743,18 @@ aligned_memory_operand (op, mode)
mode = GET_MODE (op); mode = GET_MODE (op);
} }
if (reload_in_progress && GET_CODE (op) == REG if (reload_in_progress)
&& REGNO (op) >= FIRST_PSEUDO_REGISTER) {
op = reg_equiv_mem[REGNO (op)]; /* This is a stack slot. The stack pointer is always aligned.
We may have to jump through hoops to get a valid address,
but we can do it. */
if (GET_CODE (op) == REG
&& REGNO (op) >= FIRST_PSEUDO_REGISTER)
return 1;
}
if (GET_CODE (op) != MEM || GET_MODE (op) != mode if (GET_CODE (op) != MEM
|| GET_MODE (op) != mode
|| ! memory_address_p (mode, XEXP (op, 0))) || ! memory_address_p (mode, XEXP (op, 0)))
return 0; return 0;
...@@ -899,11 +902,12 @@ direct_return () ...@@ -899,11 +902,12 @@ direct_return ()
/* REF is an alignable memory location. Place an aligned SImode /* REF is an alignable memory location. Place an aligned SImode
reference into *PALIGNED_MEM and the number of bits to shift into reference into *PALIGNED_MEM and the number of bits to shift into
*PBITNUM. */ *PBITNUM. SCRATCH is a free register for use in reloading out
of range stack slots. */
void void
get_aligned_mem (ref, paligned_mem, pbitnum) get_aligned_mem (ref, scratch, paligned_mem, pbitnum)
rtx ref; rtx ref, scratch;
rtx *paligned_mem, *pbitnum; rtx *paligned_mem, *pbitnum;
{ {
rtx base; rtx base;
...@@ -919,13 +923,48 @@ get_aligned_mem (ref, paligned_mem, pbitnum) ...@@ -919,13 +923,48 @@ get_aligned_mem (ref, paligned_mem, pbitnum)
ref = SUBREG_REG (ref); ref = SUBREG_REG (ref);
} }
if (GET_CODE (ref) == REG)
ref = reg_equiv_mem[REGNO (ref)];
if (reload_in_progress) if (reload_in_progress)
base = find_replacement (&XEXP (ref, 0)); {
if (GET_CODE (ref) == REG)
{
/* The "simple" case is where the stack slot is in range. */
if (reg_equiv_mem[REGNO (ref)])
{
ref = reg_equiv_mem[REGNO (ref)];
base = find_replacement (&XEXP (ref, 0));
}
else
{
/* The stack slot isn't in range. Fix it up as needed. */
HOST_WIDE_INT hi, lo;
base = reg_equiv_address[REGNO (ref)];
if (GET_CODE (base) != PLUS)
abort ();
offset += INTVAL (XEXP (base, 1));
base = XEXP (base, 0);
lo = ((offset & 0xFFFF) ^ 0x8000) - 0x8000;
hi = (((offset - lo) & 0xFFFFFFFF) ^ 0x80000000) - 0x80000000;
if (hi + lo != offset)
abort ();
if (scratch == NULL)
abort ();
emit_insn (gen_adddi3 (scratch, base, GEN_INT (hi)));
base = scratch;
offset = lo;
}
}
else
base = find_replacement (&XEXP (ref, 0));
}
else else
base = XEXP (ref, 0); {
if (GET_CODE (ref) != MEM)
abort ();
base = XEXP (ref, 0);
}
if (GET_CODE (base) == PLUS) if (GET_CODE (base) == PLUS)
offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0); offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
...@@ -962,13 +1001,27 @@ get_unaligned_address (ref, extra_offset) ...@@ -962,13 +1001,27 @@ get_unaligned_address (ref, extra_offset)
ref = SUBREG_REG (ref); ref = SUBREG_REG (ref);
} }
if (GET_CODE (ref) == REG)
ref = reg_equiv_mem[REGNO (ref)];
if (reload_in_progress) if (reload_in_progress)
base = find_replacement (&XEXP (ref, 0)); {
if (GET_CODE (ref) == REG)
{
if (reg_equiv_mem[REGNO (ref)])
ref = reg_equiv_mem[REGNO (ref)];
else
{
/* The stack slot is out of range. We should have handled
this as an aligned access -- I wonder why we didn't? */
abort ();
}
}
base = find_replacement (&XEXP (ref, 0));
}
else else
base = XEXP (ref, 0); {
if (GET_CODE (ref) != MEM)
abort ();
base = XEXP (ref, 0);
}
if (GET_CODE (base) == PLUS) if (GET_CODE (base) == PLUS)
offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0); offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
......
...@@ -547,7 +547,7 @@ ...@@ -547,7 +547,7 @@
(plus:DI (match_operand:DI 1 "register_operand" "") (plus:DI (match_operand:DI 1 "register_operand" "")
(match_operand:DI 2 "const_int_operand" "")))] (match_operand:DI 2 "const_int_operand" "")))]
"! add_operand (operands[2], DImode) "! add_operand (operands[2], DImode)
&& REGNO (operands[0]) != STACK_POINTER_REGNUM" && operands[0] != stack_pointer_rtx"
[(set (match_dup 0) (plus:DI (match_dup 1) (match_dup 3))) [(set (match_dup 0) (plus:DI (match_dup 1) (match_dup 3)))
(set (match_dup 0) (plus:DI (match_dup 0) (match_dup 4)))] (set (match_dup 0) (plus:DI (match_dup 0) (match_dup 4)))]
" "
...@@ -4521,7 +4521,7 @@ ...@@ -4521,7 +4521,7 @@
? gen_rtx_REG (SImode, REGNO (operands[0])) ? gen_rtx_REG (SImode, REGNO (operands[0]))
: gen_reg_rtx (SImode)); : gen_reg_rtx (SImode));
get_aligned_mem (operands[1], &aligned_mem, &bitnum); get_aligned_mem (operands[1], scratch, &aligned_mem, &bitnum);
emit_insn (gen_aligned_loadqi (operands[0], aligned_mem, bitnum, emit_insn (gen_aligned_loadqi (operands[0], aligned_mem, bitnum,
scratch)); scratch));
...@@ -4561,7 +4561,7 @@ ...@@ -4561,7 +4561,7 @@
rtx temp1 = gen_reg_rtx (SImode); rtx temp1 = gen_reg_rtx (SImode);
rtx temp2 = gen_reg_rtx (SImode); rtx temp2 = gen_reg_rtx (SImode);
get_aligned_mem (operands[0], &aligned_mem, &bitnum); get_aligned_mem (operands[0], NULL_RTX, &aligned_mem, &bitnum);
emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum, emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
temp1, temp2)); temp1, temp2));
...@@ -4632,7 +4632,7 @@ ...@@ -4632,7 +4632,7 @@
? gen_rtx_REG (SImode, REGNO (operands[0])) ? gen_rtx_REG (SImode, REGNO (operands[0]))
: gen_reg_rtx (SImode)); : gen_reg_rtx (SImode));
get_aligned_mem (operands[1], &aligned_mem, &bitnum); get_aligned_mem (operands[1], scratch, &aligned_mem, &bitnum);
emit_insn (gen_aligned_loadhi (operands[0], aligned_mem, bitnum, emit_insn (gen_aligned_loadhi (operands[0], aligned_mem, bitnum,
scratch)); scratch));
...@@ -4672,7 +4672,7 @@ ...@@ -4672,7 +4672,7 @@
rtx temp1 = gen_reg_rtx (SImode); rtx temp1 = gen_reg_rtx (SImode);
rtx temp2 = gen_reg_rtx (SImode); rtx temp2 = gen_reg_rtx (SImode);
get_aligned_mem (operands[0], &aligned_mem, &bitnum); get_aligned_mem (operands[0], NULL_RTX, &aligned_mem, &bitnum);
emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum, emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
temp1, temp2)); temp1, temp2));
...@@ -4706,18 +4706,34 @@ ...@@ -4706,18 +4706,34 @@
"! TARGET_BWX" "! TARGET_BWX"
" "
{ {
rtx addr = get_unaligned_address (operands[1], 0); rtx scratch, seq;
/* It is possible that one of the registers we got for operands[2] if (aligned_memory_operand (operands[1], QImode))
might coincide with that of operands[0] (which is why we made {
it TImode). Pick the other one to use as our scratch. */ rtx aligned_mem, bitnum;
rtx scratch = gen_rtx_REG (DImode,
REGNO (operands[0]) == REGNO (operands[2])
? REGNO (operands[2]) + 1 : REGNO (operands[2]));
rtx seq = gen_unaligned_loadqi (operands[0], addr, scratch, get_aligned_mem (operands[1],
gen_rtx_REG (DImode, REGNO (operands[0]))); gen_rtx_REG (DImode, REGNO (operands[2]) + 1),
&aligned_mem, &bitnum);
seq = gen_aligned_loadqi (operands[0], aligned_mem, bitnum,
gen_rtx_REG (SImode, REGNO (operands[2])));
}
else
{
rtx addr;
/* It is possible that one of the registers we got for operands[2]
might coincide with that of operands[0] (which is why we made
it TImode). Pick the other one to use as our scratch. */
if (REGNO (operands[0]) == REGNO (operands[2]))
scratch = gen_rtx_REG (DImode, REGNO (operands[2]) + 1);
else
scratch = gen_rtx_REG (DImode, REGNO (operands[2]));
addr = get_unaligned_address (operands[1], 0);
seq = gen_unaligned_loadqi (operands[0], addr, scratch,
gen_rtx_REG (DImode, REGNO (operands[0])));
}
alpha_set_memflags (seq, operands[1]); alpha_set_memflags (seq, operands[1]);
emit_insn (seq); emit_insn (seq);
DONE; DONE;
...@@ -4725,23 +4741,39 @@ ...@@ -4725,23 +4741,39 @@
(define_expand "reload_inhi" (define_expand "reload_inhi"
[(parallel [(match_operand:HI 0 "register_operand" "=r") [(parallel [(match_operand:HI 0 "register_operand" "=r")
(match_operand:HI 1 "unaligned_memory_operand" "m") (match_operand:HI 1 "any_memory_operand" "m")
(match_operand:TI 2 "register_operand" "=&r")])] (match_operand:TI 2 "register_operand" "=&r")])]
"! TARGET_BWX" "! TARGET_BWX"
" "
{ {
rtx addr = get_unaligned_address (operands[1], 0); rtx scratch, seq;
/* It is possible that one of the registers we got for operands[2] if (aligned_memory_operand (operands[1], HImode))
might coincide with that of operands[0] (which is why we made {
it TImode). Pick the other one to use as our scratch. */ rtx aligned_mem, bitnum;
rtx scratch = gen_rtx_REG (DImode,
REGNO (operands[0]) == REGNO (operands[2])
? REGNO (operands[2]) + 1 : REGNO (operands[2]));
rtx seq = gen_unaligned_loadhi (operands[0], addr, scratch, get_aligned_mem (operands[1],
gen_rtx_REG (DImode, REGNO (operands[0]))); gen_rtx_REG (DImode, REGNO (operands[2]) + 1),
&aligned_mem, &bitnum);
seq = gen_aligned_loadhi (operands[0], aligned_mem, bitnum,
gen_rtx_REG (SImode, REGNO (operands[2])));
}
else
{
rtx addr;
/* It is possible that one of the registers we got for operands[2]
might coincide with that of operands[0] (which is why we made
it TImode). Pick the other one to use as our scratch. */
if (REGNO (operands[0]) == REGNO (operands[2]))
scratch = gen_rtx_REG (DImode, REGNO (operands[2]) + 1);
else
scratch = gen_rtx_REG (DImode, REGNO (operands[2]));
addr = get_unaligned_address (operands[1], 0);
seq = gen_unaligned_loadhi (operands[0], addr, scratch,
gen_rtx_REG (DImode, REGNO (operands[0])));
}
alpha_set_memflags (seq, operands[1]); alpha_set_memflags (seq, operands[1]);
emit_insn (seq); emit_insn (seq);
DONE; DONE;
...@@ -4758,7 +4790,7 @@ ...@@ -4758,7 +4790,7 @@
{ {
rtx aligned_mem, bitnum; rtx aligned_mem, bitnum;
get_aligned_mem (operands[0], &aligned_mem, &bitnum); get_aligned_mem (operands[0], NULL_RTX, &aligned_mem, &bitnum);
emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum, emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
gen_rtx_REG (SImode, REGNO (operands[2])), gen_rtx_REG (SImode, REGNO (operands[2])),
...@@ -4796,7 +4828,7 @@ ...@@ -4796,7 +4828,7 @@
{ {
rtx aligned_mem, bitnum; rtx aligned_mem, bitnum;
get_aligned_mem (operands[0], &aligned_mem, &bitnum); get_aligned_mem (operands[0], NULL_RTX, &aligned_mem, &bitnum);
emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum, emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
gen_rtx_REG (SImode, REGNO (operands[2])), gen_rtx_REG (SImode, REGNO (operands[2])),
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment